ll_rw_blk.c 101 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023
  1. /*
  2. * Copyright (C) 1991, 1992 Linus Torvalds
  3. * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
  4. * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
  6. * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000
  7. * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
  8. */
  9. /*
  10. * This handles all read/write requests to block devices
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/backing-dev.h>
  15. #include <linux/bio.h>
  16. #include <linux/blkdev.h>
  17. #include <linux/highmem.h>
  18. #include <linux/mm.h>
  19. #include <linux/kernel_stat.h>
  20. #include <linux/string.h>
  21. #include <linux/init.h>
  22. #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
  23. #include <linux/completion.h>
  24. #include <linux/slab.h>
  25. #include <linux/swap.h>
  26. #include <linux/writeback.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/cpu.h>
  29. #include <linux/blktrace_api.h>
  30. /*
  31. * for max sense size
  32. */
  33. #include <scsi/scsi_cmnd.h>
  34. static void blk_unplug_work(void *data);
  35. static void blk_unplug_timeout(unsigned long data);
  36. static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
  37. static void init_request_from_bio(struct request *req, struct bio *bio);
  38. static int __make_request(request_queue_t *q, struct bio *bio);
  39. /*
  40. * For the allocated request tables
  41. */
  42. static kmem_cache_t *request_cachep;
  43. /*
  44. * For queue allocation
  45. */
  46. static kmem_cache_t *requestq_cachep;
  47. /*
  48. * For io context allocations
  49. */
  50. static kmem_cache_t *iocontext_cachep;
  51. static wait_queue_head_t congestion_wqh[2] = {
  52. __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
  53. __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
  54. };
  55. /*
  56. * Controlling structure to kblockd
  57. */
  58. static struct workqueue_struct *kblockd_workqueue;
  59. unsigned long blk_max_low_pfn, blk_max_pfn;
  60. EXPORT_SYMBOL(blk_max_low_pfn);
  61. EXPORT_SYMBOL(blk_max_pfn);
  62. static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
  63. /* Amount of time in which a process may batch requests */
  64. #define BLK_BATCH_TIME (HZ/50UL)
  65. /* Number of requests a "batching" process may submit */
  66. #define BLK_BATCH_REQ 32
  67. /*
  68. * Return the threshold (number of used requests) at which the queue is
  69. * considered to be congested. It include a little hysteresis to keep the
  70. * context switch rate down.
  71. */
  72. static inline int queue_congestion_on_threshold(struct request_queue *q)
  73. {
  74. return q->nr_congestion_on;
  75. }
  76. /*
  77. * The threshold at which a queue is considered to be uncongested
  78. */
  79. static inline int queue_congestion_off_threshold(struct request_queue *q)
  80. {
  81. return q->nr_congestion_off;
  82. }
  83. static void blk_queue_congestion_threshold(struct request_queue *q)
  84. {
  85. int nr;
  86. nr = q->nr_requests - (q->nr_requests / 8) + 1;
  87. if (nr > q->nr_requests)
  88. nr = q->nr_requests;
  89. q->nr_congestion_on = nr;
  90. nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
  91. if (nr < 1)
  92. nr = 1;
  93. q->nr_congestion_off = nr;
  94. }
  95. /*
  96. * A queue has just exitted congestion. Note this in the global counter of
  97. * congested queues, and wake up anyone who was waiting for requests to be
  98. * put back.
  99. */
  100. static void clear_queue_congested(request_queue_t *q, int rw)
  101. {
  102. enum bdi_state bit;
  103. wait_queue_head_t *wqh = &congestion_wqh[rw];
  104. bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
  105. clear_bit(bit, &q->backing_dev_info.state);
  106. smp_mb__after_clear_bit();
  107. if (waitqueue_active(wqh))
  108. wake_up(wqh);
  109. }
  110. /*
  111. * A queue has just entered congestion. Flag that in the queue's VM-visible
  112. * state flags and increment the global gounter of congested queues.
  113. */
  114. static void set_queue_congested(request_queue_t *q, int rw)
  115. {
  116. enum bdi_state bit;
  117. bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
  118. set_bit(bit, &q->backing_dev_info.state);
  119. }
  120. /**
  121. * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
  122. * @bdev: device
  123. *
  124. * Locates the passed device's request queue and returns the address of its
  125. * backing_dev_info
  126. *
  127. * Will return NULL if the request queue cannot be located.
  128. */
  129. struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
  130. {
  131. struct backing_dev_info *ret = NULL;
  132. request_queue_t *q = bdev_get_queue(bdev);
  133. if (q)
  134. ret = &q->backing_dev_info;
  135. return ret;
  136. }
  137. EXPORT_SYMBOL(blk_get_backing_dev_info);
  138. void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
  139. {
  140. q->activity_fn = fn;
  141. q->activity_data = data;
  142. }
  143. EXPORT_SYMBOL(blk_queue_activity_fn);
  144. /**
  145. * blk_queue_prep_rq - set a prepare_request function for queue
  146. * @q: queue
  147. * @pfn: prepare_request function
  148. *
  149. * It's possible for a queue to register a prepare_request callback which
  150. * is invoked before the request is handed to the request_fn. The goal of
  151. * the function is to prepare a request for I/O, it can be used to build a
  152. * cdb from the request data for instance.
  153. *
  154. */
  155. void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
  156. {
  157. q->prep_rq_fn = pfn;
  158. }
  159. EXPORT_SYMBOL(blk_queue_prep_rq);
  160. /**
  161. * blk_queue_merge_bvec - set a merge_bvec function for queue
  162. * @q: queue
  163. * @mbfn: merge_bvec_fn
  164. *
  165. * Usually queues have static limitations on the max sectors or segments that
  166. * we can put in a request. Stacking drivers may have some settings that
  167. * are dynamic, and thus we have to query the queue whether it is ok to
  168. * add a new bio_vec to a bio at a given offset or not. If the block device
  169. * has such limitations, it needs to register a merge_bvec_fn to control
  170. * the size of bio's sent to it. Note that a block device *must* allow a
  171. * single page to be added to an empty bio. The block device driver may want
  172. * to use the bio_split() function to deal with these bio's. By default
  173. * no merge_bvec_fn is defined for a queue, and only the fixed limits are
  174. * honored.
  175. */
  176. void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
  177. {
  178. q->merge_bvec_fn = mbfn;
  179. }
  180. EXPORT_SYMBOL(blk_queue_merge_bvec);
  181. void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
  182. {
  183. q->softirq_done_fn = fn;
  184. }
  185. EXPORT_SYMBOL(blk_queue_softirq_done);
  186. /**
  187. * blk_queue_make_request - define an alternate make_request function for a device
  188. * @q: the request queue for the device to be affected
  189. * @mfn: the alternate make_request function
  190. *
  191. * Description:
  192. * The normal way for &struct bios to be passed to a device
  193. * driver is for them to be collected into requests on a request
  194. * queue, and then to allow the device driver to select requests
  195. * off that queue when it is ready. This works well for many block
  196. * devices. However some block devices (typically virtual devices
  197. * such as md or lvm) do not benefit from the processing on the
  198. * request queue, and are served best by having the requests passed
  199. * directly to them. This can be achieved by providing a function
  200. * to blk_queue_make_request().
  201. *
  202. * Caveat:
  203. * The driver that does this *must* be able to deal appropriately
  204. * with buffers in "highmemory". This can be accomplished by either calling
  205. * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
  206. * blk_queue_bounce() to create a buffer in normal memory.
  207. **/
  208. void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
  209. {
  210. /*
  211. * set defaults
  212. */
  213. q->nr_requests = BLKDEV_MAX_RQ;
  214. blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
  215. blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
  216. q->make_request_fn = mfn;
  217. q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
  218. q->backing_dev_info.state = 0;
  219. q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
  220. blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
  221. blk_queue_hardsect_size(q, 512);
  222. blk_queue_dma_alignment(q, 511);
  223. blk_queue_congestion_threshold(q);
  224. q->nr_batching = BLK_BATCH_REQ;
  225. q->unplug_thresh = 4; /* hmm */
  226. q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
  227. if (q->unplug_delay == 0)
  228. q->unplug_delay = 1;
  229. INIT_WORK(&q->unplug_work, blk_unplug_work, q);
  230. q->unplug_timer.function = blk_unplug_timeout;
  231. q->unplug_timer.data = (unsigned long)q;
  232. /*
  233. * by default assume old behaviour and bounce for any highmem page
  234. */
  235. blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
  236. blk_queue_activity_fn(q, NULL, NULL);
  237. }
  238. EXPORT_SYMBOL(blk_queue_make_request);
  239. static inline void rq_init(request_queue_t *q, struct request *rq)
  240. {
  241. INIT_LIST_HEAD(&rq->queuelist);
  242. INIT_LIST_HEAD(&rq->donelist);
  243. rq->errors = 0;
  244. rq->rq_status = RQ_ACTIVE;
  245. rq->bio = rq->biotail = NULL;
  246. rq->ioprio = 0;
  247. rq->buffer = NULL;
  248. rq->ref_count = 1;
  249. rq->q = q;
  250. rq->waiting = NULL;
  251. rq->special = NULL;
  252. rq->data_len = 0;
  253. rq->data = NULL;
  254. rq->nr_phys_segments = 0;
  255. rq->sense = NULL;
  256. rq->end_io = NULL;
  257. rq->end_io_data = NULL;
  258. rq->completion_data = NULL;
  259. }
  260. /**
  261. * blk_queue_ordered - does this queue support ordered writes
  262. * @q: the request queue
  263. * @ordered: one of QUEUE_ORDERED_*
  264. * @prepare_flush_fn: rq setup helper for cache flush ordered writes
  265. *
  266. * Description:
  267. * For journalled file systems, doing ordered writes on a commit
  268. * block instead of explicitly doing wait_on_buffer (which is bad
  269. * for performance) can be a big win. Block drivers supporting this
  270. * feature should call this function and indicate so.
  271. *
  272. **/
  273. int blk_queue_ordered(request_queue_t *q, unsigned ordered,
  274. prepare_flush_fn *prepare_flush_fn)
  275. {
  276. if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
  277. prepare_flush_fn == NULL) {
  278. printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
  279. return -EINVAL;
  280. }
  281. if (ordered != QUEUE_ORDERED_NONE &&
  282. ordered != QUEUE_ORDERED_DRAIN &&
  283. ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
  284. ordered != QUEUE_ORDERED_DRAIN_FUA &&
  285. ordered != QUEUE_ORDERED_TAG &&
  286. ordered != QUEUE_ORDERED_TAG_FLUSH &&
  287. ordered != QUEUE_ORDERED_TAG_FUA) {
  288. printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
  289. return -EINVAL;
  290. }
  291. q->ordered = ordered;
  292. q->next_ordered = ordered;
  293. q->prepare_flush_fn = prepare_flush_fn;
  294. return 0;
  295. }
  296. EXPORT_SYMBOL(blk_queue_ordered);
  297. /**
  298. * blk_queue_issue_flush_fn - set function for issuing a flush
  299. * @q: the request queue
  300. * @iff: the function to be called issuing the flush
  301. *
  302. * Description:
  303. * If a driver supports issuing a flush command, the support is notified
  304. * to the block layer by defining it through this call.
  305. *
  306. **/
  307. void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)
  308. {
  309. q->issue_flush_fn = iff;
  310. }
  311. EXPORT_SYMBOL(blk_queue_issue_flush_fn);
  312. /*
  313. * Cache flushing for ordered writes handling
  314. */
  315. inline unsigned blk_ordered_cur_seq(request_queue_t *q)
  316. {
  317. if (!q->ordseq)
  318. return 0;
  319. return 1 << ffz(q->ordseq);
  320. }
  321. unsigned blk_ordered_req_seq(struct request *rq)
  322. {
  323. request_queue_t *q = rq->q;
  324. BUG_ON(q->ordseq == 0);
  325. if (rq == &q->pre_flush_rq)
  326. return QUEUE_ORDSEQ_PREFLUSH;
  327. if (rq == &q->bar_rq)
  328. return QUEUE_ORDSEQ_BAR;
  329. if (rq == &q->post_flush_rq)
  330. return QUEUE_ORDSEQ_POSTFLUSH;
  331. if ((rq->flags & REQ_ORDERED_COLOR) ==
  332. (q->orig_bar_rq->flags & REQ_ORDERED_COLOR))
  333. return QUEUE_ORDSEQ_DRAIN;
  334. else
  335. return QUEUE_ORDSEQ_DONE;
  336. }
  337. void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)
  338. {
  339. struct request *rq;
  340. int uptodate;
  341. if (error && !q->orderr)
  342. q->orderr = error;
  343. BUG_ON(q->ordseq & seq);
  344. q->ordseq |= seq;
  345. if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
  346. return;
  347. /*
  348. * Okay, sequence complete.
  349. */
  350. rq = q->orig_bar_rq;
  351. uptodate = q->orderr ? q->orderr : 1;
  352. q->ordseq = 0;
  353. end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
  354. end_that_request_last(rq, uptodate);
  355. }
  356. static void pre_flush_end_io(struct request *rq, int error)
  357. {
  358. elv_completed_request(rq->q, rq);
  359. blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
  360. }
  361. static void bar_end_io(struct request *rq, int error)
  362. {
  363. elv_completed_request(rq->q, rq);
  364. blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
  365. }
  366. static void post_flush_end_io(struct request *rq, int error)
  367. {
  368. elv_completed_request(rq->q, rq);
  369. blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
  370. }
  371. static void queue_flush(request_queue_t *q, unsigned which)
  372. {
  373. struct request *rq;
  374. rq_end_io_fn *end_io;
  375. if (which == QUEUE_ORDERED_PREFLUSH) {
  376. rq = &q->pre_flush_rq;
  377. end_io = pre_flush_end_io;
  378. } else {
  379. rq = &q->post_flush_rq;
  380. end_io = post_flush_end_io;
  381. }
  382. rq_init(q, rq);
  383. rq->flags = REQ_HARDBARRIER;
  384. rq->elevator_private = NULL;
  385. rq->rq_disk = q->bar_rq.rq_disk;
  386. rq->rl = NULL;
  387. rq->end_io = end_io;
  388. q->prepare_flush_fn(q, rq);
  389. elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
  390. }
  391. static inline struct request *start_ordered(request_queue_t *q,
  392. struct request *rq)
  393. {
  394. q->bi_size = 0;
  395. q->orderr = 0;
  396. q->ordered = q->next_ordered;
  397. q->ordseq |= QUEUE_ORDSEQ_STARTED;
  398. /*
  399. * Prep proxy barrier request.
  400. */
  401. blkdev_dequeue_request(rq);
  402. q->orig_bar_rq = rq;
  403. rq = &q->bar_rq;
  404. rq_init(q, rq);
  405. rq->flags = bio_data_dir(q->orig_bar_rq->bio);
  406. rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
  407. rq->elevator_private = NULL;
  408. rq->rl = NULL;
  409. init_request_from_bio(rq, q->orig_bar_rq->bio);
  410. rq->end_io = bar_end_io;
  411. /*
  412. * Queue ordered sequence. As we stack them at the head, we
  413. * need to queue in reverse order. Note that we rely on that
  414. * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
  415. * request gets inbetween ordered sequence.
  416. */
  417. if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
  418. queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
  419. else
  420. q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
  421. elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
  422. if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
  423. queue_flush(q, QUEUE_ORDERED_PREFLUSH);
  424. rq = &q->pre_flush_rq;
  425. } else
  426. q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
  427. if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
  428. q->ordseq |= QUEUE_ORDSEQ_DRAIN;
  429. else
  430. rq = NULL;
  431. return rq;
  432. }
  433. int blk_do_ordered(request_queue_t *q, struct request **rqp)
  434. {
  435. struct request *rq = *rqp;
  436. int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
  437. if (!q->ordseq) {
  438. if (!is_barrier)
  439. return 1;
  440. if (q->next_ordered != QUEUE_ORDERED_NONE) {
  441. *rqp = start_ordered(q, rq);
  442. return 1;
  443. } else {
  444. /*
  445. * This can happen when the queue switches to
  446. * ORDERED_NONE while this request is on it.
  447. */
  448. blkdev_dequeue_request(rq);
  449. end_that_request_first(rq, -EOPNOTSUPP,
  450. rq->hard_nr_sectors);
  451. end_that_request_last(rq, -EOPNOTSUPP);
  452. *rqp = NULL;
  453. return 0;
  454. }
  455. }
  456. /*
  457. * Ordered sequence in progress
  458. */
  459. /* Special requests are not subject to ordering rules. */
  460. if (!blk_fs_request(rq) &&
  461. rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
  462. return 1;
  463. if (q->ordered & QUEUE_ORDERED_TAG) {
  464. /* Ordered by tag. Blocking the next barrier is enough. */
  465. if (is_barrier && rq != &q->bar_rq)
  466. *rqp = NULL;
  467. } else {
  468. /* Ordered by draining. Wait for turn. */
  469. WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
  470. if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
  471. *rqp = NULL;
  472. }
  473. return 1;
  474. }
  475. static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
  476. {
  477. request_queue_t *q = bio->bi_private;
  478. struct bio_vec *bvec;
  479. int i;
  480. /*
  481. * This is dry run, restore bio_sector and size. We'll finish
  482. * this request again with the original bi_end_io after an
  483. * error occurs or post flush is complete.
  484. */
  485. q->bi_size += bytes;
  486. if (bio->bi_size)
  487. return 1;
  488. /* Rewind bvec's */
  489. bio->bi_idx = 0;
  490. bio_for_each_segment(bvec, bio, i) {
  491. bvec->bv_len += bvec->bv_offset;
  492. bvec->bv_offset = 0;
  493. }
  494. /* Reset bio */
  495. set_bit(BIO_UPTODATE, &bio->bi_flags);
  496. bio->bi_size = q->bi_size;
  497. bio->bi_sector -= (q->bi_size >> 9);
  498. q->bi_size = 0;
  499. return 0;
  500. }
  501. static inline int ordered_bio_endio(struct request *rq, struct bio *bio,
  502. unsigned int nbytes, int error)
  503. {
  504. request_queue_t *q = rq->q;
  505. bio_end_io_t *endio;
  506. void *private;
  507. if (&q->bar_rq != rq)
  508. return 0;
  509. /*
  510. * Okay, this is the barrier request in progress, dry finish it.
  511. */
  512. if (error && !q->orderr)
  513. q->orderr = error;
  514. endio = bio->bi_end_io;
  515. private = bio->bi_private;
  516. bio->bi_end_io = flush_dry_bio_endio;
  517. bio->bi_private = q;
  518. bio_endio(bio, nbytes, error);
  519. bio->bi_end_io = endio;
  520. bio->bi_private = private;
  521. return 1;
  522. }
  523. /**
  524. * blk_queue_bounce_limit - set bounce buffer limit for queue
  525. * @q: the request queue for the device
  526. * @dma_addr: bus address limit
  527. *
  528. * Description:
  529. * Different hardware can have different requirements as to what pages
  530. * it can do I/O directly to. A low level driver can call
  531. * blk_queue_bounce_limit to have lower memory pages allocated as bounce
  532. * buffers for doing I/O to pages residing above @page.
  533. **/
  534. void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
  535. {
  536. unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
  537. int dma = 0;
  538. q->bounce_gfp = GFP_NOIO;
  539. #if BITS_PER_LONG == 64
  540. /* Assume anything <= 4GB can be handled by IOMMU.
  541. Actually some IOMMUs can handle everything, but I don't
  542. know of a way to test this here. */
  543. if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
  544. dma = 1;
  545. q->bounce_pfn = max_low_pfn;
  546. #else
  547. if (bounce_pfn < blk_max_low_pfn)
  548. dma = 1;
  549. q->bounce_pfn = bounce_pfn;
  550. #endif
  551. if (dma) {
  552. init_emergency_isa_pool();
  553. q->bounce_gfp = GFP_NOIO | GFP_DMA;
  554. q->bounce_pfn = bounce_pfn;
  555. }
  556. }
  557. EXPORT_SYMBOL(blk_queue_bounce_limit);
  558. /**
  559. * blk_queue_max_sectors - set max sectors for a request for this queue
  560. * @q: the request queue for the device
  561. * @max_sectors: max sectors in the usual 512b unit
  562. *
  563. * Description:
  564. * Enables a low level driver to set an upper limit on the size of
  565. * received requests.
  566. **/
  567. void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
  568. {
  569. if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
  570. max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
  571. printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
  572. }
  573. if (BLK_DEF_MAX_SECTORS > max_sectors)
  574. q->max_hw_sectors = q->max_sectors = max_sectors;
  575. else {
  576. q->max_sectors = BLK_DEF_MAX_SECTORS;
  577. q->max_hw_sectors = max_sectors;
  578. }
  579. }
  580. EXPORT_SYMBOL(blk_queue_max_sectors);
  581. /**
  582. * blk_queue_max_phys_segments - set max phys segments for a request for this queue
  583. * @q: the request queue for the device
  584. * @max_segments: max number of segments
  585. *
  586. * Description:
  587. * Enables a low level driver to set an upper limit on the number of
  588. * physical data segments in a request. This would be the largest sized
  589. * scatter list the driver could handle.
  590. **/
  591. void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
  592. {
  593. if (!max_segments) {
  594. max_segments = 1;
  595. printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
  596. }
  597. q->max_phys_segments = max_segments;
  598. }
  599. EXPORT_SYMBOL(blk_queue_max_phys_segments);
  600. /**
  601. * blk_queue_max_hw_segments - set max hw segments for a request for this queue
  602. * @q: the request queue for the device
  603. * @max_segments: max number of segments
  604. *
  605. * Description:
  606. * Enables a low level driver to set an upper limit on the number of
  607. * hw data segments in a request. This would be the largest number of
  608. * address/length pairs the host adapter can actually give as once
  609. * to the device.
  610. **/
  611. void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
  612. {
  613. if (!max_segments) {
  614. max_segments = 1;
  615. printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
  616. }
  617. q->max_hw_segments = max_segments;
  618. }
  619. EXPORT_SYMBOL(blk_queue_max_hw_segments);
  620. /**
  621. * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
  622. * @q: the request queue for the device
  623. * @max_size: max size of segment in bytes
  624. *
  625. * Description:
  626. * Enables a low level driver to set an upper limit on the size of a
  627. * coalesced segment
  628. **/
  629. void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size)
  630. {
  631. if (max_size < PAGE_CACHE_SIZE) {
  632. max_size = PAGE_CACHE_SIZE;
  633. printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
  634. }
  635. q->max_segment_size = max_size;
  636. }
  637. EXPORT_SYMBOL(blk_queue_max_segment_size);
  638. /**
  639. * blk_queue_hardsect_size - set hardware sector size for the queue
  640. * @q: the request queue for the device
  641. * @size: the hardware sector size, in bytes
  642. *
  643. * Description:
  644. * This should typically be set to the lowest possible sector size
  645. * that the hardware can operate on (possible without reverting to
  646. * even internal read-modify-write operations). Usually the default
  647. * of 512 covers most hardware.
  648. **/
  649. void blk_queue_hardsect_size(request_queue_t *q, unsigned short size)
  650. {
  651. q->hardsect_size = size;
  652. }
  653. EXPORT_SYMBOL(blk_queue_hardsect_size);
  654. /*
  655. * Returns the minimum that is _not_ zero, unless both are zero.
  656. */
  657. #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
  658. /**
  659. * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
  660. * @t: the stacking driver (top)
  661. * @b: the underlying device (bottom)
  662. **/
  663. void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
  664. {
  665. /* zero is "infinity" */
  666. t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
  667. t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
  668. t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
  669. t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
  670. t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
  671. t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
  672. if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
  673. clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
  674. }
  675. EXPORT_SYMBOL(blk_queue_stack_limits);
  676. /**
  677. * blk_queue_segment_boundary - set boundary rules for segment merging
  678. * @q: the request queue for the device
  679. * @mask: the memory boundary mask
  680. **/
  681. void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
  682. {
  683. if (mask < PAGE_CACHE_SIZE - 1) {
  684. mask = PAGE_CACHE_SIZE - 1;
  685. printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
  686. }
  687. q->seg_boundary_mask = mask;
  688. }
  689. EXPORT_SYMBOL(blk_queue_segment_boundary);
  690. /**
  691. * blk_queue_dma_alignment - set dma length and memory alignment
  692. * @q: the request queue for the device
  693. * @mask: alignment mask
  694. *
  695. * description:
  696. * set required memory and length aligment for direct dma transactions.
  697. * this is used when buiding direct io requests for the queue.
  698. *
  699. **/
  700. void blk_queue_dma_alignment(request_queue_t *q, int mask)
  701. {
  702. q->dma_alignment = mask;
  703. }
  704. EXPORT_SYMBOL(blk_queue_dma_alignment);
  705. /**
  706. * blk_queue_find_tag - find a request by its tag and queue
  707. * @q: The request queue for the device
  708. * @tag: The tag of the request
  709. *
  710. * Notes:
  711. * Should be used when a device returns a tag and you want to match
  712. * it with a request.
  713. *
  714. * no locks need be held.
  715. **/
  716. struct request *blk_queue_find_tag(request_queue_t *q, int tag)
  717. {
  718. struct blk_queue_tag *bqt = q->queue_tags;
  719. if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
  720. return NULL;
  721. return bqt->tag_index[tag];
  722. }
  723. EXPORT_SYMBOL(blk_queue_find_tag);
  724. /**
  725. * __blk_free_tags - release a given set of tag maintenance info
  726. * @bqt: the tag map to free
  727. *
  728. * Tries to free the specified @bqt@. Returns true if it was
  729. * actually freed and false if there are still references using it
  730. */
  731. static int __blk_free_tags(struct blk_queue_tag *bqt)
  732. {
  733. int retval;
  734. retval = atomic_dec_and_test(&bqt->refcnt);
  735. if (retval) {
  736. BUG_ON(bqt->busy);
  737. BUG_ON(!list_empty(&bqt->busy_list));
  738. kfree(bqt->tag_index);
  739. bqt->tag_index = NULL;
  740. kfree(bqt->tag_map);
  741. bqt->tag_map = NULL;
  742. kfree(bqt);
  743. }
  744. return retval;
  745. }
  746. /**
  747. * __blk_queue_free_tags - release tag maintenance info
  748. * @q: the request queue for the device
  749. *
  750. * Notes:
  751. * blk_cleanup_queue() will take care of calling this function, if tagging
  752. * has been used. So there's no need to call this directly.
  753. **/
  754. static void __blk_queue_free_tags(request_queue_t *q)
  755. {
  756. struct blk_queue_tag *bqt = q->queue_tags;
  757. if (!bqt)
  758. return;
  759. __blk_free_tags(bqt);
  760. q->queue_tags = NULL;
  761. q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
  762. }
  763. /**
  764. * blk_free_tags - release a given set of tag maintenance info
  765. * @bqt: the tag map to free
  766. *
  767. * For externally managed @bqt@ frees the map. Callers of this
  768. * function must guarantee to have released all the queues that
  769. * might have been using this tag map.
  770. */
  771. void blk_free_tags(struct blk_queue_tag *bqt)
  772. {
  773. if (unlikely(!__blk_free_tags(bqt)))
  774. BUG();
  775. }
  776. EXPORT_SYMBOL(blk_free_tags);
  777. /**
  778. * blk_queue_free_tags - release tag maintenance info
  779. * @q: the request queue for the device
  780. *
  781. * Notes:
  782. * This is used to disabled tagged queuing to a device, yet leave
  783. * queue in function.
  784. **/
  785. void blk_queue_free_tags(request_queue_t *q)
  786. {
  787. clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
  788. }
  789. EXPORT_SYMBOL(blk_queue_free_tags);
  790. static int
  791. init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
  792. {
  793. struct request **tag_index;
  794. unsigned long *tag_map;
  795. int nr_ulongs;
  796. if (q && depth > q->nr_requests * 2) {
  797. depth = q->nr_requests * 2;
  798. printk(KERN_ERR "%s: adjusted depth to %d\n",
  799. __FUNCTION__, depth);
  800. }
  801. tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
  802. if (!tag_index)
  803. goto fail;
  804. nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
  805. tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
  806. if (!tag_map)
  807. goto fail;
  808. tags->real_max_depth = depth;
  809. tags->max_depth = depth;
  810. tags->tag_index = tag_index;
  811. tags->tag_map = tag_map;
  812. return 0;
  813. fail:
  814. kfree(tag_index);
  815. return -ENOMEM;
  816. }
  817. static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
  818. int depth)
  819. {
  820. struct blk_queue_tag *tags;
  821. tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
  822. if (!tags)
  823. goto fail;
  824. if (init_tag_map(q, tags, depth))
  825. goto fail;
  826. INIT_LIST_HEAD(&tags->busy_list);
  827. tags->busy = 0;
  828. atomic_set(&tags->refcnt, 1);
  829. return tags;
  830. fail:
  831. kfree(tags);
  832. return NULL;
  833. }
  834. /**
  835. * blk_init_tags - initialize the tag info for an external tag map
  836. * @depth: the maximum queue depth supported
  837. * @tags: the tag to use
  838. **/
  839. struct blk_queue_tag *blk_init_tags(int depth)
  840. {
  841. return __blk_queue_init_tags(NULL, depth);
  842. }
  843. EXPORT_SYMBOL(blk_init_tags);
  844. /**
  845. * blk_queue_init_tags - initialize the queue tag info
  846. * @q: the request queue for the device
  847. * @depth: the maximum queue depth supported
  848. * @tags: the tag to use
  849. **/
  850. int blk_queue_init_tags(request_queue_t *q, int depth,
  851. struct blk_queue_tag *tags)
  852. {
  853. int rc;
  854. BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
  855. if (!tags && !q->queue_tags) {
  856. tags = __blk_queue_init_tags(q, depth);
  857. if (!tags)
  858. goto fail;
  859. } else if (q->queue_tags) {
  860. if ((rc = blk_queue_resize_tags(q, depth)))
  861. return rc;
  862. set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
  863. return 0;
  864. } else
  865. atomic_inc(&tags->refcnt);
  866. /*
  867. * assign it, all done
  868. */
  869. q->queue_tags = tags;
  870. q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
  871. return 0;
  872. fail:
  873. kfree(tags);
  874. return -ENOMEM;
  875. }
  876. EXPORT_SYMBOL(blk_queue_init_tags);
  877. /**
  878. * blk_queue_resize_tags - change the queueing depth
  879. * @q: the request queue for the device
  880. * @new_depth: the new max command queueing depth
  881. *
  882. * Notes:
  883. * Must be called with the queue lock held.
  884. **/
  885. int blk_queue_resize_tags(request_queue_t *q, int new_depth)
  886. {
  887. struct blk_queue_tag *bqt = q->queue_tags;
  888. struct request **tag_index;
  889. unsigned long *tag_map;
  890. int max_depth, nr_ulongs;
  891. if (!bqt)
  892. return -ENXIO;
  893. /*
  894. * if we already have large enough real_max_depth. just
  895. * adjust max_depth. *NOTE* as requests with tag value
  896. * between new_depth and real_max_depth can be in-flight, tag
  897. * map can not be shrunk blindly here.
  898. */
  899. if (new_depth <= bqt->real_max_depth) {
  900. bqt->max_depth = new_depth;
  901. return 0;
  902. }
  903. /*
  904. * Currently cannot replace a shared tag map with a new
  905. * one, so error out if this is the case
  906. */
  907. if (atomic_read(&bqt->refcnt) != 1)
  908. return -EBUSY;
  909. /*
  910. * save the old state info, so we can copy it back
  911. */
  912. tag_index = bqt->tag_index;
  913. tag_map = bqt->tag_map;
  914. max_depth = bqt->real_max_depth;
  915. if (init_tag_map(q, bqt, new_depth))
  916. return -ENOMEM;
  917. memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
  918. nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
  919. memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
  920. kfree(tag_index);
  921. kfree(tag_map);
  922. return 0;
  923. }
  924. EXPORT_SYMBOL(blk_queue_resize_tags);
  925. /**
  926. * blk_queue_end_tag - end tag operations for a request
  927. * @q: the request queue for the device
  928. * @rq: the request that has completed
  929. *
  930. * Description:
  931. * Typically called when end_that_request_first() returns 0, meaning
  932. * all transfers have been done for a request. It's important to call
  933. * this function before end_that_request_last(), as that will put the
  934. * request back on the free list thus corrupting the internal tag list.
  935. *
  936. * Notes:
  937. * queue lock must be held.
  938. **/
  939. void blk_queue_end_tag(request_queue_t *q, struct request *rq)
  940. {
  941. struct blk_queue_tag *bqt = q->queue_tags;
  942. int tag = rq->tag;
  943. BUG_ON(tag == -1);
  944. if (unlikely(tag >= bqt->real_max_depth))
  945. /*
  946. * This can happen after tag depth has been reduced.
  947. * FIXME: how about a warning or info message here?
  948. */
  949. return;
  950. if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
  951. printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
  952. __FUNCTION__, tag);
  953. return;
  954. }
  955. list_del_init(&rq->queuelist);
  956. rq->flags &= ~REQ_QUEUED;
  957. rq->tag = -1;
  958. if (unlikely(bqt->tag_index[tag] == NULL))
  959. printk(KERN_ERR "%s: tag %d is missing\n",
  960. __FUNCTION__, tag);
  961. bqt->tag_index[tag] = NULL;
  962. bqt->busy--;
  963. }
  964. EXPORT_SYMBOL(blk_queue_end_tag);
  965. /**
  966. * blk_queue_start_tag - find a free tag and assign it
  967. * @q: the request queue for the device
  968. * @rq: the block request that needs tagging
  969. *
  970. * Description:
  971. * This can either be used as a stand-alone helper, or possibly be
  972. * assigned as the queue &prep_rq_fn (in which case &struct request
  973. * automagically gets a tag assigned). Note that this function
  974. * assumes that any type of request can be queued! if this is not
  975. * true for your device, you must check the request type before
  976. * calling this function. The request will also be removed from
  977. * the request queue, so it's the drivers responsibility to readd
  978. * it if it should need to be restarted for some reason.
  979. *
  980. * Notes:
  981. * queue lock must be held.
  982. **/
  983. int blk_queue_start_tag(request_queue_t *q, struct request *rq)
  984. {
  985. struct blk_queue_tag *bqt = q->queue_tags;
  986. int tag;
  987. if (unlikely((rq->flags & REQ_QUEUED))) {
  988. printk(KERN_ERR
  989. "%s: request %p for device [%s] already tagged %d",
  990. __FUNCTION__, rq,
  991. rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
  992. BUG();
  993. }
  994. tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
  995. if (tag >= bqt->max_depth)
  996. return 1;
  997. __set_bit(tag, bqt->tag_map);
  998. rq->flags |= REQ_QUEUED;
  999. rq->tag = tag;
  1000. bqt->tag_index[tag] = rq;
  1001. blkdev_dequeue_request(rq);
  1002. list_add(&rq->queuelist, &bqt->busy_list);
  1003. bqt->busy++;
  1004. return 0;
  1005. }
  1006. EXPORT_SYMBOL(blk_queue_start_tag);
  1007. /**
  1008. * blk_queue_invalidate_tags - invalidate all pending tags
  1009. * @q: the request queue for the device
  1010. *
  1011. * Description:
  1012. * Hardware conditions may dictate a need to stop all pending requests.
  1013. * In this case, we will safely clear the block side of the tag queue and
  1014. * readd all requests to the request queue in the right order.
  1015. *
  1016. * Notes:
  1017. * queue lock must be held.
  1018. **/
  1019. void blk_queue_invalidate_tags(request_queue_t *q)
  1020. {
  1021. struct blk_queue_tag *bqt = q->queue_tags;
  1022. struct list_head *tmp, *n;
  1023. struct request *rq;
  1024. list_for_each_safe(tmp, n, &bqt->busy_list) {
  1025. rq = list_entry_rq(tmp);
  1026. if (rq->tag == -1) {
  1027. printk(KERN_ERR
  1028. "%s: bad tag found on list\n", __FUNCTION__);
  1029. list_del_init(&rq->queuelist);
  1030. rq->flags &= ~REQ_QUEUED;
  1031. } else
  1032. blk_queue_end_tag(q, rq);
  1033. rq->flags &= ~REQ_STARTED;
  1034. __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
  1035. }
  1036. }
  1037. EXPORT_SYMBOL(blk_queue_invalidate_tags);
  1038. static const char * const rq_flags[] = {
  1039. "REQ_RW",
  1040. "REQ_FAILFAST",
  1041. "REQ_SORTED",
  1042. "REQ_SOFTBARRIER",
  1043. "REQ_HARDBARRIER",
  1044. "REQ_FUA",
  1045. "REQ_CMD",
  1046. "REQ_NOMERGE",
  1047. "REQ_STARTED",
  1048. "REQ_DONTPREP",
  1049. "REQ_QUEUED",
  1050. "REQ_ELVPRIV",
  1051. "REQ_PC",
  1052. "REQ_BLOCK_PC",
  1053. "REQ_SENSE",
  1054. "REQ_FAILED",
  1055. "REQ_QUIET",
  1056. "REQ_SPECIAL",
  1057. "REQ_DRIVE_CMD",
  1058. "REQ_DRIVE_TASK",
  1059. "REQ_DRIVE_TASKFILE",
  1060. "REQ_PREEMPT",
  1061. "REQ_PM_SUSPEND",
  1062. "REQ_PM_RESUME",
  1063. "REQ_PM_SHUTDOWN",
  1064. "REQ_ORDERED_COLOR",
  1065. };
  1066. void blk_dump_rq_flags(struct request *rq, char *msg)
  1067. {
  1068. int bit;
  1069. printk("%s: dev %s: flags = ", msg,
  1070. rq->rq_disk ? rq->rq_disk->disk_name : "?");
  1071. bit = 0;
  1072. do {
  1073. if (rq->flags & (1 << bit))
  1074. printk("%s ", rq_flags[bit]);
  1075. bit++;
  1076. } while (bit < __REQ_NR_BITS);
  1077. printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
  1078. rq->nr_sectors,
  1079. rq->current_nr_sectors);
  1080. printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
  1081. if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) {
  1082. printk("cdb: ");
  1083. for (bit = 0; bit < sizeof(rq->cmd); bit++)
  1084. printk("%02x ", rq->cmd[bit]);
  1085. printk("\n");
  1086. }
  1087. }
  1088. EXPORT_SYMBOL(blk_dump_rq_flags);
  1089. void blk_recount_segments(request_queue_t *q, struct bio *bio)
  1090. {
  1091. struct bio_vec *bv, *bvprv = NULL;
  1092. int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
  1093. int high, highprv = 1;
  1094. if (unlikely(!bio->bi_io_vec))
  1095. return;
  1096. cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
  1097. hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
  1098. bio_for_each_segment(bv, bio, i) {
  1099. /*
  1100. * the trick here is making sure that a high page is never
  1101. * considered part of another segment, since that might
  1102. * change with the bounce page.
  1103. */
  1104. high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;
  1105. if (high || highprv)
  1106. goto new_hw_segment;
  1107. if (cluster) {
  1108. if (seg_size + bv->bv_len > q->max_segment_size)
  1109. goto new_segment;
  1110. if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
  1111. goto new_segment;
  1112. if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
  1113. goto new_segment;
  1114. if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
  1115. goto new_hw_segment;
  1116. seg_size += bv->bv_len;
  1117. hw_seg_size += bv->bv_len;
  1118. bvprv = bv;
  1119. continue;
  1120. }
  1121. new_segment:
  1122. if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
  1123. !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
  1124. hw_seg_size += bv->bv_len;
  1125. } else {
  1126. new_hw_segment:
  1127. if (hw_seg_size > bio->bi_hw_front_size)
  1128. bio->bi_hw_front_size = hw_seg_size;
  1129. hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
  1130. nr_hw_segs++;
  1131. }
  1132. nr_phys_segs++;
  1133. bvprv = bv;
  1134. seg_size = bv->bv_len;
  1135. highprv = high;
  1136. }
  1137. if (hw_seg_size > bio->bi_hw_back_size)
  1138. bio->bi_hw_back_size = hw_seg_size;
  1139. if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
  1140. bio->bi_hw_front_size = hw_seg_size;
  1141. bio->bi_phys_segments = nr_phys_segs;
  1142. bio->bi_hw_segments = nr_hw_segs;
  1143. bio->bi_flags |= (1 << BIO_SEG_VALID);
  1144. }
  1145. static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
  1146. struct bio *nxt)
  1147. {
  1148. if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
  1149. return 0;
  1150. if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
  1151. return 0;
  1152. if (bio->bi_size + nxt->bi_size > q->max_segment_size)
  1153. return 0;
  1154. /*
  1155. * bio and nxt are contigous in memory, check if the queue allows
  1156. * these two to be merged into one
  1157. */
  1158. if (BIO_SEG_BOUNDARY(q, bio, nxt))
  1159. return 1;
  1160. return 0;
  1161. }
  1162. static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
  1163. struct bio *nxt)
  1164. {
  1165. if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
  1166. blk_recount_segments(q, bio);
  1167. if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
  1168. blk_recount_segments(q, nxt);
  1169. if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
  1170. BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size))
  1171. return 0;
  1172. if (bio->bi_size + nxt->bi_size > q->max_segment_size)
  1173. return 0;
  1174. return 1;
  1175. }
  1176. /*
  1177. * map a request to scatterlist, return number of sg entries setup. Caller
  1178. * must make sure sg can hold rq->nr_phys_segments entries
  1179. */
  1180. int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
  1181. {
  1182. struct bio_vec *bvec, *bvprv;
  1183. struct bio *bio;
  1184. int nsegs, i, cluster;
  1185. nsegs = 0;
  1186. cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
  1187. /*
  1188. * for each bio in rq
  1189. */
  1190. bvprv = NULL;
  1191. rq_for_each_bio(bio, rq) {
  1192. /*
  1193. * for each segment in bio
  1194. */
  1195. bio_for_each_segment(bvec, bio, i) {
  1196. int nbytes = bvec->bv_len;
  1197. if (bvprv && cluster) {
  1198. if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
  1199. goto new_segment;
  1200. if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
  1201. goto new_segment;
  1202. if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
  1203. goto new_segment;
  1204. sg[nsegs - 1].length += nbytes;
  1205. } else {
  1206. new_segment:
  1207. memset(&sg[nsegs],0,sizeof(struct scatterlist));
  1208. sg[nsegs].page = bvec->bv_page;
  1209. sg[nsegs].length = nbytes;
  1210. sg[nsegs].offset = bvec->bv_offset;
  1211. nsegs++;
  1212. }
  1213. bvprv = bvec;
  1214. } /* segments in bio */
  1215. } /* bios in rq */
  1216. return nsegs;
  1217. }
  1218. EXPORT_SYMBOL(blk_rq_map_sg);
  1219. /*
  1220. * the standard queue merge functions, can be overridden with device
  1221. * specific ones if so desired
  1222. */
  1223. static inline int ll_new_mergeable(request_queue_t *q,
  1224. struct request *req,
  1225. struct bio *bio)
  1226. {
  1227. int nr_phys_segs = bio_phys_segments(q, bio);
  1228. if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
  1229. req->flags |= REQ_NOMERGE;
  1230. if (req == q->last_merge)
  1231. q->last_merge = NULL;
  1232. return 0;
  1233. }
  1234. /*
  1235. * A hw segment is just getting larger, bump just the phys
  1236. * counter.
  1237. */
  1238. req->nr_phys_segments += nr_phys_segs;
  1239. return 1;
  1240. }
  1241. static inline int ll_new_hw_segment(request_queue_t *q,
  1242. struct request *req,
  1243. struct bio *bio)
  1244. {
  1245. int nr_hw_segs = bio_hw_segments(q, bio);
  1246. int nr_phys_segs = bio_phys_segments(q, bio);
  1247. if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
  1248. || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
  1249. req->flags |= REQ_NOMERGE;
  1250. if (req == q->last_merge)
  1251. q->last_merge = NULL;
  1252. return 0;
  1253. }
  1254. /*
  1255. * This will form the start of a new hw segment. Bump both
  1256. * counters.
  1257. */
  1258. req->nr_hw_segments += nr_hw_segs;
  1259. req->nr_phys_segments += nr_phys_segs;
  1260. return 1;
  1261. }
  1262. static int ll_back_merge_fn(request_queue_t *q, struct request *req,
  1263. struct bio *bio)
  1264. {
  1265. unsigned short max_sectors;
  1266. int len;
  1267. if (unlikely(blk_pc_request(req)))
  1268. max_sectors = q->max_hw_sectors;
  1269. else
  1270. max_sectors = q->max_sectors;
  1271. if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
  1272. req->flags |= REQ_NOMERGE;
  1273. if (req == q->last_merge)
  1274. q->last_merge = NULL;
  1275. return 0;
  1276. }
  1277. if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
  1278. blk_recount_segments(q, req->biotail);
  1279. if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
  1280. blk_recount_segments(q, bio);
  1281. len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
  1282. if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
  1283. !BIOVEC_VIRT_OVERSIZE(len)) {
  1284. int mergeable = ll_new_mergeable(q, req, bio);
  1285. if (mergeable) {
  1286. if (req->nr_hw_segments == 1)
  1287. req->bio->bi_hw_front_size = len;
  1288. if (bio->bi_hw_segments == 1)
  1289. bio->bi_hw_back_size = len;
  1290. }
  1291. return mergeable;
  1292. }
  1293. return ll_new_hw_segment(q, req, bio);
  1294. }
  1295. static int ll_front_merge_fn(request_queue_t *q, struct request *req,
  1296. struct bio *bio)
  1297. {
  1298. unsigned short max_sectors;
  1299. int len;
  1300. if (unlikely(blk_pc_request(req)))
  1301. max_sectors = q->max_hw_sectors;
  1302. else
  1303. max_sectors = q->max_sectors;
  1304. if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
  1305. req->flags |= REQ_NOMERGE;
  1306. if (req == q->last_merge)
  1307. q->last_merge = NULL;
  1308. return 0;
  1309. }
  1310. len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
  1311. if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
  1312. blk_recount_segments(q, bio);
  1313. if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
  1314. blk_recount_segments(q, req->bio);
  1315. if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
  1316. !BIOVEC_VIRT_OVERSIZE(len)) {
  1317. int mergeable = ll_new_mergeable(q, req, bio);
  1318. if (mergeable) {
  1319. if (bio->bi_hw_segments == 1)
  1320. bio->bi_hw_front_size = len;
  1321. if (req->nr_hw_segments == 1)
  1322. req->biotail->bi_hw_back_size = len;
  1323. }
  1324. return mergeable;
  1325. }
  1326. return ll_new_hw_segment(q, req, bio);
  1327. }
  1328. static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
  1329. struct request *next)
  1330. {
  1331. int total_phys_segments;
  1332. int total_hw_segments;
  1333. /*
  1334. * First check if the either of the requests are re-queued
  1335. * requests. Can't merge them if they are.
  1336. */
  1337. if (req->special || next->special)
  1338. return 0;
  1339. /*
  1340. * Will it become too large?
  1341. */
  1342. if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
  1343. return 0;
  1344. total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
  1345. if (blk_phys_contig_segment(q, req->biotail, next->bio))
  1346. total_phys_segments--;
  1347. if (total_phys_segments > q->max_phys_segments)
  1348. return 0;
  1349. total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
  1350. if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
  1351. int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
  1352. /*
  1353. * propagate the combined length to the end of the requests
  1354. */
  1355. if (req->nr_hw_segments == 1)
  1356. req->bio->bi_hw_front_size = len;
  1357. if (next->nr_hw_segments == 1)
  1358. next->biotail->bi_hw_back_size = len;
  1359. total_hw_segments--;
  1360. }
  1361. if (total_hw_segments > q->max_hw_segments)
  1362. return 0;
  1363. /* Merge is OK... */
  1364. req->nr_phys_segments = total_phys_segments;
  1365. req->nr_hw_segments = total_hw_segments;
  1366. return 1;
  1367. }
  1368. /*
  1369. * "plug" the device if there are no outstanding requests: this will
  1370. * force the transfer to start only after we have put all the requests
  1371. * on the list.
  1372. *
  1373. * This is called with interrupts off and no requests on the queue and
  1374. * with the queue lock held.
  1375. */
  1376. void blk_plug_device(request_queue_t *q)
  1377. {
  1378. WARN_ON(!irqs_disabled());
  1379. /*
  1380. * don't plug a stopped queue, it must be paired with blk_start_queue()
  1381. * which will restart the queueing
  1382. */
  1383. if (blk_queue_stopped(q))
  1384. return;
  1385. if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
  1386. mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
  1387. blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
  1388. }
  1389. }
  1390. EXPORT_SYMBOL(blk_plug_device);
  1391. /*
  1392. * remove the queue from the plugged list, if present. called with
  1393. * queue lock held and interrupts disabled.
  1394. */
  1395. int blk_remove_plug(request_queue_t *q)
  1396. {
  1397. WARN_ON(!irqs_disabled());
  1398. if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
  1399. return 0;
  1400. del_timer(&q->unplug_timer);
  1401. return 1;
  1402. }
  1403. EXPORT_SYMBOL(blk_remove_plug);
  1404. /*
  1405. * remove the plug and let it rip..
  1406. */
  1407. void __generic_unplug_device(request_queue_t *q)
  1408. {
  1409. if (unlikely(blk_queue_stopped(q)))
  1410. return;
  1411. if (!blk_remove_plug(q))
  1412. return;
  1413. q->request_fn(q);
  1414. }
  1415. EXPORT_SYMBOL(__generic_unplug_device);
  1416. /**
  1417. * generic_unplug_device - fire a request queue
  1418. * @q: The &request_queue_t in question
  1419. *
  1420. * Description:
  1421. * Linux uses plugging to build bigger requests queues before letting
  1422. * the device have at them. If a queue is plugged, the I/O scheduler
  1423. * is still adding and merging requests on the queue. Once the queue
  1424. * gets unplugged, the request_fn defined for the queue is invoked and
  1425. * transfers started.
  1426. **/
  1427. void generic_unplug_device(request_queue_t *q)
  1428. {
  1429. spin_lock_irq(q->queue_lock);
  1430. __generic_unplug_device(q);
  1431. spin_unlock_irq(q->queue_lock);
  1432. }
  1433. EXPORT_SYMBOL(generic_unplug_device);
  1434. static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
  1435. struct page *page)
  1436. {
  1437. request_queue_t *q = bdi->unplug_io_data;
  1438. /*
  1439. * devices don't necessarily have an ->unplug_fn defined
  1440. */
  1441. if (q->unplug_fn) {
  1442. blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
  1443. q->rq.count[READ] + q->rq.count[WRITE]);
  1444. q->unplug_fn(q);
  1445. }
  1446. }
  1447. static void blk_unplug_work(void *data)
  1448. {
  1449. request_queue_t *q = data;
  1450. blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
  1451. q->rq.count[READ] + q->rq.count[WRITE]);
  1452. q->unplug_fn(q);
  1453. }
  1454. static void blk_unplug_timeout(unsigned long data)
  1455. {
  1456. request_queue_t *q = (request_queue_t *)data;
  1457. blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
  1458. q->rq.count[READ] + q->rq.count[WRITE]);
  1459. kblockd_schedule_work(&q->unplug_work);
  1460. }
  1461. /**
  1462. * blk_start_queue - restart a previously stopped queue
  1463. * @q: The &request_queue_t in question
  1464. *
  1465. * Description:
  1466. * blk_start_queue() will clear the stop flag on the queue, and call
  1467. * the request_fn for the queue if it was in a stopped state when
  1468. * entered. Also see blk_stop_queue(). Queue lock must be held.
  1469. **/
  1470. void blk_start_queue(request_queue_t *q)
  1471. {
  1472. WARN_ON(!irqs_disabled());
  1473. clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
  1474. /*
  1475. * one level of recursion is ok and is much faster than kicking
  1476. * the unplug handling
  1477. */
  1478. if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
  1479. q->request_fn(q);
  1480. clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
  1481. } else {
  1482. blk_plug_device(q);
  1483. kblockd_schedule_work(&q->unplug_work);
  1484. }
  1485. }
  1486. EXPORT_SYMBOL(blk_start_queue);
  1487. /**
  1488. * blk_stop_queue - stop a queue
  1489. * @q: The &request_queue_t in question
  1490. *
  1491. * Description:
  1492. * The Linux block layer assumes that a block driver will consume all
  1493. * entries on the request queue when the request_fn strategy is called.
  1494. * Often this will not happen, because of hardware limitations (queue
  1495. * depth settings). If a device driver gets a 'queue full' response,
  1496. * or if it simply chooses not to queue more I/O at one point, it can
  1497. * call this function to prevent the request_fn from being called until
  1498. * the driver has signalled it's ready to go again. This happens by calling
  1499. * blk_start_queue() to restart queue operations. Queue lock must be held.
  1500. **/
  1501. void blk_stop_queue(request_queue_t *q)
  1502. {
  1503. blk_remove_plug(q);
  1504. set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
  1505. }
  1506. EXPORT_SYMBOL(blk_stop_queue);
  1507. /**
  1508. * blk_sync_queue - cancel any pending callbacks on a queue
  1509. * @q: the queue
  1510. *
  1511. * Description:
  1512. * The block layer may perform asynchronous callback activity
  1513. * on a queue, such as calling the unplug function after a timeout.
  1514. * A block device may call blk_sync_queue to ensure that any
  1515. * such activity is cancelled, thus allowing it to release resources
  1516. * the the callbacks might use. The caller must already have made sure
  1517. * that its ->make_request_fn will not re-add plugging prior to calling
  1518. * this function.
  1519. *
  1520. */
  1521. void blk_sync_queue(struct request_queue *q)
  1522. {
  1523. del_timer_sync(&q->unplug_timer);
  1524. kblockd_flush();
  1525. }
  1526. EXPORT_SYMBOL(blk_sync_queue);
  1527. /**
  1528. * blk_run_queue - run a single device queue
  1529. * @q: The queue to run
  1530. */
  1531. void blk_run_queue(struct request_queue *q)
  1532. {
  1533. unsigned long flags;
  1534. spin_lock_irqsave(q->queue_lock, flags);
  1535. blk_remove_plug(q);
  1536. /*
  1537. * Only recurse once to avoid overrunning the stack, let the unplug
  1538. * handling reinvoke the handler shortly if we already got there.
  1539. */
  1540. if (!elv_queue_empty(q)) {
  1541. if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
  1542. q->request_fn(q);
  1543. clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
  1544. } else {
  1545. blk_plug_device(q);
  1546. kblockd_schedule_work(&q->unplug_work);
  1547. }
  1548. }
  1549. spin_unlock_irqrestore(q->queue_lock, flags);
  1550. }
  1551. EXPORT_SYMBOL(blk_run_queue);
  1552. /**
  1553. * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
  1554. * @kobj: the kobj belonging of the request queue to be released
  1555. *
  1556. * Description:
  1557. * blk_cleanup_queue is the pair to blk_init_queue() or
  1558. * blk_queue_make_request(). It should be called when a request queue is
  1559. * being released; typically when a block device is being de-registered.
  1560. * Currently, its primary task it to free all the &struct request
  1561. * structures that were allocated to the queue and the queue itself.
  1562. *
  1563. * Caveat:
  1564. * Hopefully the low level driver will have finished any
  1565. * outstanding requests first...
  1566. **/
  1567. static void blk_release_queue(struct kobject *kobj)
  1568. {
  1569. request_queue_t *q = container_of(kobj, struct request_queue, kobj);
  1570. struct request_list *rl = &q->rq;
  1571. blk_sync_queue(q);
  1572. if (rl->rq_pool)
  1573. mempool_destroy(rl->rq_pool);
  1574. if (q->queue_tags)
  1575. __blk_queue_free_tags(q);
  1576. blk_trace_shutdown(q);
  1577. kmem_cache_free(requestq_cachep, q);
  1578. }
  1579. void blk_put_queue(request_queue_t *q)
  1580. {
  1581. kobject_put(&q->kobj);
  1582. }
  1583. EXPORT_SYMBOL(blk_put_queue);
  1584. void blk_cleanup_queue(request_queue_t * q)
  1585. {
  1586. mutex_lock(&q->sysfs_lock);
  1587. set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
  1588. mutex_unlock(&q->sysfs_lock);
  1589. if (q->elevator)
  1590. elevator_exit(q->elevator);
  1591. blk_put_queue(q);
  1592. }
  1593. EXPORT_SYMBOL(blk_cleanup_queue);
  1594. static int blk_init_free_list(request_queue_t *q)
  1595. {
  1596. struct request_list *rl = &q->rq;
  1597. rl->count[READ] = rl->count[WRITE] = 0;
  1598. rl->starved[READ] = rl->starved[WRITE] = 0;
  1599. rl->elvpriv = 0;
  1600. init_waitqueue_head(&rl->wait[READ]);
  1601. init_waitqueue_head(&rl->wait[WRITE]);
  1602. rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
  1603. mempool_free_slab, request_cachep, q->node);
  1604. if (!rl->rq_pool)
  1605. return -ENOMEM;
  1606. return 0;
  1607. }
  1608. request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
  1609. {
  1610. return blk_alloc_queue_node(gfp_mask, -1);
  1611. }
  1612. EXPORT_SYMBOL(blk_alloc_queue);
  1613. static struct kobj_type queue_ktype;
  1614. request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
  1615. {
  1616. request_queue_t *q;
  1617. q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);
  1618. if (!q)
  1619. return NULL;
  1620. memset(q, 0, sizeof(*q));
  1621. init_timer(&q->unplug_timer);
  1622. snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
  1623. q->kobj.ktype = &queue_ktype;
  1624. kobject_init(&q->kobj);
  1625. q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
  1626. q->backing_dev_info.unplug_io_data = q;
  1627. mutex_init(&q->sysfs_lock);
  1628. return q;
  1629. }
  1630. EXPORT_SYMBOL(blk_alloc_queue_node);
  1631. /**
  1632. * blk_init_queue - prepare a request queue for use with a block device
  1633. * @rfn: The function to be called to process requests that have been
  1634. * placed on the queue.
  1635. * @lock: Request queue spin lock
  1636. *
  1637. * Description:
  1638. * If a block device wishes to use the standard request handling procedures,
  1639. * which sorts requests and coalesces adjacent requests, then it must
  1640. * call blk_init_queue(). The function @rfn will be called when there
  1641. * are requests on the queue that need to be processed. If the device
  1642. * supports plugging, then @rfn may not be called immediately when requests
  1643. * are available on the queue, but may be called at some time later instead.
  1644. * Plugged queues are generally unplugged when a buffer belonging to one
  1645. * of the requests on the queue is needed, or due to memory pressure.
  1646. *
  1647. * @rfn is not required, or even expected, to remove all requests off the
  1648. * queue, but only as many as it can handle at a time. If it does leave
  1649. * requests on the queue, it is responsible for arranging that the requests
  1650. * get dealt with eventually.
  1651. *
  1652. * The queue spin lock must be held while manipulating the requests on the
  1653. * request queue; this lock will be taken also from interrupt context, so irq
  1654. * disabling is needed for it.
  1655. *
  1656. * Function returns a pointer to the initialized request queue, or NULL if
  1657. * it didn't succeed.
  1658. *
  1659. * Note:
  1660. * blk_init_queue() must be paired with a blk_cleanup_queue() call
  1661. * when the block device is deactivated (such as at module unload).
  1662. **/
  1663. request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
  1664. {
  1665. return blk_init_queue_node(rfn, lock, -1);
  1666. }
  1667. EXPORT_SYMBOL(blk_init_queue);
  1668. request_queue_t *
  1669. blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
  1670. {
  1671. request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
  1672. if (!q)
  1673. return NULL;
  1674. q->node = node_id;
  1675. if (blk_init_free_list(q)) {
  1676. kmem_cache_free(requestq_cachep, q);
  1677. return NULL;
  1678. }
  1679. /*
  1680. * if caller didn't supply a lock, they get per-queue locking with
  1681. * our embedded lock
  1682. */
  1683. if (!lock) {
  1684. spin_lock_init(&q->__queue_lock);
  1685. lock = &q->__queue_lock;
  1686. }
  1687. q->request_fn = rfn;
  1688. q->back_merge_fn = ll_back_merge_fn;
  1689. q->front_merge_fn = ll_front_merge_fn;
  1690. q->merge_requests_fn = ll_merge_requests_fn;
  1691. q->prep_rq_fn = NULL;
  1692. q->unplug_fn = generic_unplug_device;
  1693. q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
  1694. q->queue_lock = lock;
  1695. blk_queue_segment_boundary(q, 0xffffffff);
  1696. blk_queue_make_request(q, __make_request);
  1697. blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
  1698. blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
  1699. blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
  1700. /*
  1701. * all done
  1702. */
  1703. if (!elevator_init(q, NULL)) {
  1704. blk_queue_congestion_threshold(q);
  1705. return q;
  1706. }
  1707. blk_put_queue(q);
  1708. return NULL;
  1709. }
  1710. EXPORT_SYMBOL(blk_init_queue_node);
  1711. int blk_get_queue(request_queue_t *q)
  1712. {
  1713. if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
  1714. kobject_get(&q->kobj);
  1715. return 0;
  1716. }
  1717. return 1;
  1718. }
  1719. EXPORT_SYMBOL(blk_get_queue);
  1720. static inline void blk_free_request(request_queue_t *q, struct request *rq)
  1721. {
  1722. if (rq->flags & REQ_ELVPRIV)
  1723. elv_put_request(q, rq);
  1724. mempool_free(rq, q->rq.rq_pool);
  1725. }
  1726. static inline struct request *
  1727. blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
  1728. int priv, gfp_t gfp_mask)
  1729. {
  1730. struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
  1731. if (!rq)
  1732. return NULL;
  1733. /*
  1734. * first three bits are identical in rq->flags and bio->bi_rw,
  1735. * see bio.h and blkdev.h
  1736. */
  1737. rq->flags = rw;
  1738. if (priv) {
  1739. if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
  1740. mempool_free(rq, q->rq.rq_pool);
  1741. return NULL;
  1742. }
  1743. rq->flags |= REQ_ELVPRIV;
  1744. }
  1745. return rq;
  1746. }
  1747. /*
  1748. * ioc_batching returns true if the ioc is a valid batching request and
  1749. * should be given priority access to a request.
  1750. */
  1751. static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
  1752. {
  1753. if (!ioc)
  1754. return 0;
  1755. /*
  1756. * Make sure the process is able to allocate at least 1 request
  1757. * even if the batch times out, otherwise we could theoretically
  1758. * lose wakeups.
  1759. */
  1760. return ioc->nr_batch_requests == q->nr_batching ||
  1761. (ioc->nr_batch_requests > 0
  1762. && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
  1763. }
  1764. /*
  1765. * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
  1766. * will cause the process to be a "batcher" on all queues in the system. This
  1767. * is the behaviour we want though - once it gets a wakeup it should be given
  1768. * a nice run.
  1769. */
  1770. static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
  1771. {
  1772. if (!ioc || ioc_batching(q, ioc))
  1773. return;
  1774. ioc->nr_batch_requests = q->nr_batching;
  1775. ioc->last_waited = jiffies;
  1776. }
  1777. static void __freed_request(request_queue_t *q, int rw)
  1778. {
  1779. struct request_list *rl = &q->rq;
  1780. if (rl->count[rw] < queue_congestion_off_threshold(q))
  1781. clear_queue_congested(q, rw);
  1782. if (rl->count[rw] + 1 <= q->nr_requests) {
  1783. if (waitqueue_active(&rl->wait[rw]))
  1784. wake_up(&rl->wait[rw]);
  1785. blk_clear_queue_full(q, rw);
  1786. }
  1787. }
  1788. /*
  1789. * A request has just been released. Account for it, update the full and
  1790. * congestion status, wake up any waiters. Called under q->queue_lock.
  1791. */
  1792. static void freed_request(request_queue_t *q, int rw, int priv)
  1793. {
  1794. struct request_list *rl = &q->rq;
  1795. rl->count[rw]--;
  1796. if (priv)
  1797. rl->elvpriv--;
  1798. __freed_request(q, rw);
  1799. if (unlikely(rl->starved[rw ^ 1]))
  1800. __freed_request(q, rw ^ 1);
  1801. }
  1802. #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
  1803. /*
  1804. * Get a free request, queue_lock must be held.
  1805. * Returns NULL on failure, with queue_lock held.
  1806. * Returns !NULL on success, with queue_lock *not held*.
  1807. */
  1808. static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
  1809. gfp_t gfp_mask)
  1810. {
  1811. struct request *rq = NULL;
  1812. struct request_list *rl = &q->rq;
  1813. struct io_context *ioc = NULL;
  1814. int may_queue, priv;
  1815. may_queue = elv_may_queue(q, rw, bio);
  1816. if (may_queue == ELV_MQUEUE_NO)
  1817. goto rq_starved;
  1818. if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
  1819. if (rl->count[rw]+1 >= q->nr_requests) {
  1820. ioc = current_io_context(GFP_ATOMIC);
  1821. /*
  1822. * The queue will fill after this allocation, so set
  1823. * it as full, and mark this process as "batching".
  1824. * This process will be allowed to complete a batch of
  1825. * requests, others will be blocked.
  1826. */
  1827. if (!blk_queue_full(q, rw)) {
  1828. ioc_set_batching(q, ioc);
  1829. blk_set_queue_full(q, rw);
  1830. } else {
  1831. if (may_queue != ELV_MQUEUE_MUST
  1832. && !ioc_batching(q, ioc)) {
  1833. /*
  1834. * The queue is full and the allocating
  1835. * process is not a "batcher", and not
  1836. * exempted by the IO scheduler
  1837. */
  1838. goto out;
  1839. }
  1840. }
  1841. }
  1842. set_queue_congested(q, rw);
  1843. }
  1844. /*
  1845. * Only allow batching queuers to allocate up to 50% over the defined
  1846. * limit of requests, otherwise we could have thousands of requests
  1847. * allocated with any setting of ->nr_requests
  1848. */
  1849. if (rl->count[rw] >= (3 * q->nr_requests / 2))
  1850. goto out;
  1851. rl->count[rw]++;
  1852. rl->starved[rw] = 0;
  1853. priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
  1854. if (priv)
  1855. rl->elvpriv++;
  1856. spin_unlock_irq(q->queue_lock);
  1857. rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
  1858. if (unlikely(!rq)) {
  1859. /*
  1860. * Allocation failed presumably due to memory. Undo anything
  1861. * we might have messed up.
  1862. *
  1863. * Allocating task should really be put onto the front of the
  1864. * wait queue, but this is pretty rare.
  1865. */
  1866. spin_lock_irq(q->queue_lock);
  1867. freed_request(q, rw, priv);
  1868. /*
  1869. * in the very unlikely event that allocation failed and no
  1870. * requests for this direction was pending, mark us starved
  1871. * so that freeing of a request in the other direction will
  1872. * notice us. another possible fix would be to split the
  1873. * rq mempool into READ and WRITE
  1874. */
  1875. rq_starved:
  1876. if (unlikely(rl->count[rw] == 0))
  1877. rl->starved[rw] = 1;
  1878. goto out;
  1879. }
  1880. /*
  1881. * ioc may be NULL here, and ioc_batching will be false. That's
  1882. * OK, if the queue is under the request limit then requests need
  1883. * not count toward the nr_batch_requests limit. There will always
  1884. * be some limit enforced by BLK_BATCH_TIME.
  1885. */
  1886. if (ioc_batching(q, ioc))
  1887. ioc->nr_batch_requests--;
  1888. rq_init(q, rq);
  1889. rq->rl = rl;
  1890. blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
  1891. out:
  1892. return rq;
  1893. }
  1894. /*
  1895. * No available requests for this queue, unplug the device and wait for some
  1896. * requests to become available.
  1897. *
  1898. * Called with q->queue_lock held, and returns with it unlocked.
  1899. */
  1900. static struct request *get_request_wait(request_queue_t *q, int rw,
  1901. struct bio *bio)
  1902. {
  1903. struct request *rq;
  1904. rq = get_request(q, rw, bio, GFP_NOIO);
  1905. while (!rq) {
  1906. DEFINE_WAIT(wait);
  1907. struct request_list *rl = &q->rq;
  1908. prepare_to_wait_exclusive(&rl->wait[rw], &wait,
  1909. TASK_UNINTERRUPTIBLE);
  1910. rq = get_request(q, rw, bio, GFP_NOIO);
  1911. if (!rq) {
  1912. struct io_context *ioc;
  1913. blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
  1914. __generic_unplug_device(q);
  1915. spin_unlock_irq(q->queue_lock);
  1916. io_schedule();
  1917. /*
  1918. * After sleeping, we become a "batching" process and
  1919. * will be able to allocate at least one request, and
  1920. * up to a big batch of them for a small period time.
  1921. * See ioc_batching, ioc_set_batching
  1922. */
  1923. ioc = current_io_context(GFP_NOIO);
  1924. ioc_set_batching(q, ioc);
  1925. spin_lock_irq(q->queue_lock);
  1926. }
  1927. finish_wait(&rl->wait[rw], &wait);
  1928. }
  1929. return rq;
  1930. }
  1931. struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
  1932. {
  1933. struct request *rq;
  1934. BUG_ON(rw != READ && rw != WRITE);
  1935. spin_lock_irq(q->queue_lock);
  1936. if (gfp_mask & __GFP_WAIT) {
  1937. rq = get_request_wait(q, rw, NULL);
  1938. } else {
  1939. rq = get_request(q, rw, NULL, gfp_mask);
  1940. if (!rq)
  1941. spin_unlock_irq(q->queue_lock);
  1942. }
  1943. /* q->queue_lock is unlocked at this point */
  1944. return rq;
  1945. }
  1946. EXPORT_SYMBOL(blk_get_request);
  1947. /**
  1948. * blk_requeue_request - put a request back on queue
  1949. * @q: request queue where request should be inserted
  1950. * @rq: request to be inserted
  1951. *
  1952. * Description:
  1953. * Drivers often keep queueing requests until the hardware cannot accept
  1954. * more, when that condition happens we need to put the request back
  1955. * on the queue. Must be called with queue lock held.
  1956. */
  1957. void blk_requeue_request(request_queue_t *q, struct request *rq)
  1958. {
  1959. blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
  1960. if (blk_rq_tagged(rq))
  1961. blk_queue_end_tag(q, rq);
  1962. elv_requeue_request(q, rq);
  1963. }
  1964. EXPORT_SYMBOL(blk_requeue_request);
  1965. /**
  1966. * blk_insert_request - insert a special request in to a request queue
  1967. * @q: request queue where request should be inserted
  1968. * @rq: request to be inserted
  1969. * @at_head: insert request at head or tail of queue
  1970. * @data: private data
  1971. *
  1972. * Description:
  1973. * Many block devices need to execute commands asynchronously, so they don't
  1974. * block the whole kernel from preemption during request execution. This is
  1975. * accomplished normally by inserting aritficial requests tagged as
  1976. * REQ_SPECIAL in to the corresponding request queue, and letting them be
  1977. * scheduled for actual execution by the request queue.
  1978. *
  1979. * We have the option of inserting the head or the tail of the queue.
  1980. * Typically we use the tail for new ioctls and so forth. We use the head
  1981. * of the queue for things like a QUEUE_FULL message from a device, or a
  1982. * host that is unable to accept a particular command.
  1983. */
  1984. void blk_insert_request(request_queue_t *q, struct request *rq,
  1985. int at_head, void *data)
  1986. {
  1987. int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
  1988. unsigned long flags;
  1989. /*
  1990. * tell I/O scheduler that this isn't a regular read/write (ie it
  1991. * must not attempt merges on this) and that it acts as a soft
  1992. * barrier
  1993. */
  1994. rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER;
  1995. rq->special = data;
  1996. spin_lock_irqsave(q->queue_lock, flags);
  1997. /*
  1998. * If command is tagged, release the tag
  1999. */
  2000. if (blk_rq_tagged(rq))
  2001. blk_queue_end_tag(q, rq);
  2002. drive_stat_acct(rq, rq->nr_sectors, 1);
  2003. __elv_add_request(q, rq, where, 0);
  2004. if (blk_queue_plugged(q))
  2005. __generic_unplug_device(q);
  2006. else
  2007. q->request_fn(q);
  2008. spin_unlock_irqrestore(q->queue_lock, flags);
  2009. }
  2010. EXPORT_SYMBOL(blk_insert_request);
  2011. /**
  2012. * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
  2013. * @q: request queue where request should be inserted
  2014. * @rq: request structure to fill
  2015. * @ubuf: the user buffer
  2016. * @len: length of user data
  2017. *
  2018. * Description:
  2019. * Data will be mapped directly for zero copy io, if possible. Otherwise
  2020. * a kernel bounce buffer is used.
  2021. *
  2022. * A matching blk_rq_unmap_user() must be issued at the end of io, while
  2023. * still in process context.
  2024. *
  2025. * Note: The mapped bio may need to be bounced through blk_queue_bounce()
  2026. * before being submitted to the device, as pages mapped may be out of
  2027. * reach. It's the callers responsibility to make sure this happens. The
  2028. * original bio must be passed back in to blk_rq_unmap_user() for proper
  2029. * unmapping.
  2030. */
  2031. int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
  2032. unsigned int len)
  2033. {
  2034. unsigned long uaddr;
  2035. struct bio *bio;
  2036. int reading;
  2037. if (len > (q->max_hw_sectors << 9))
  2038. return -EINVAL;
  2039. if (!len || !ubuf)
  2040. return -EINVAL;
  2041. reading = rq_data_dir(rq) == READ;
  2042. /*
  2043. * if alignment requirement is satisfied, map in user pages for
  2044. * direct dma. else, set up kernel bounce buffers
  2045. */
  2046. uaddr = (unsigned long) ubuf;
  2047. if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
  2048. bio = bio_map_user(q, NULL, uaddr, len, reading);
  2049. else
  2050. bio = bio_copy_user(q, uaddr, len, reading);
  2051. if (!IS_ERR(bio)) {
  2052. rq->bio = rq->biotail = bio;
  2053. blk_rq_bio_prep(q, rq, bio);
  2054. rq->buffer = rq->data = NULL;
  2055. rq->data_len = len;
  2056. return 0;
  2057. }
  2058. /*
  2059. * bio is the err-ptr
  2060. */
  2061. return PTR_ERR(bio);
  2062. }
  2063. EXPORT_SYMBOL(blk_rq_map_user);
  2064. /**
  2065. * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
  2066. * @q: request queue where request should be inserted
  2067. * @rq: request to map data to
  2068. * @iov: pointer to the iovec
  2069. * @iov_count: number of elements in the iovec
  2070. *
  2071. * Description:
  2072. * Data will be mapped directly for zero copy io, if possible. Otherwise
  2073. * a kernel bounce buffer is used.
  2074. *
  2075. * A matching blk_rq_unmap_user() must be issued at the end of io, while
  2076. * still in process context.
  2077. *
  2078. * Note: The mapped bio may need to be bounced through blk_queue_bounce()
  2079. * before being submitted to the device, as pages mapped may be out of
  2080. * reach. It's the callers responsibility to make sure this happens. The
  2081. * original bio must be passed back in to blk_rq_unmap_user() for proper
  2082. * unmapping.
  2083. */
  2084. int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
  2085. struct sg_iovec *iov, int iov_count)
  2086. {
  2087. struct bio *bio;
  2088. if (!iov || iov_count <= 0)
  2089. return -EINVAL;
  2090. /* we don't allow misaligned data like bio_map_user() does. If the
  2091. * user is using sg, they're expected to know the alignment constraints
  2092. * and respect them accordingly */
  2093. bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
  2094. if (IS_ERR(bio))
  2095. return PTR_ERR(bio);
  2096. rq->bio = rq->biotail = bio;
  2097. blk_rq_bio_prep(q, rq, bio);
  2098. rq->buffer = rq->data = NULL;
  2099. rq->data_len = bio->bi_size;
  2100. return 0;
  2101. }
  2102. EXPORT_SYMBOL(blk_rq_map_user_iov);
  2103. /**
  2104. * blk_rq_unmap_user - unmap a request with user data
  2105. * @bio: bio to be unmapped
  2106. * @ulen: length of user buffer
  2107. *
  2108. * Description:
  2109. * Unmap a bio previously mapped by blk_rq_map_user().
  2110. */
  2111. int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
  2112. {
  2113. int ret = 0;
  2114. if (bio) {
  2115. if (bio_flagged(bio, BIO_USER_MAPPED))
  2116. bio_unmap_user(bio);
  2117. else
  2118. ret = bio_uncopy_user(bio);
  2119. }
  2120. return 0;
  2121. }
  2122. EXPORT_SYMBOL(blk_rq_unmap_user);
  2123. /**
  2124. * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
  2125. * @q: request queue where request should be inserted
  2126. * @rq: request to fill
  2127. * @kbuf: the kernel buffer
  2128. * @len: length of user data
  2129. * @gfp_mask: memory allocation flags
  2130. */
  2131. int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
  2132. unsigned int len, gfp_t gfp_mask)
  2133. {
  2134. struct bio *bio;
  2135. if (len > (q->max_hw_sectors << 9))
  2136. return -EINVAL;
  2137. if (!len || !kbuf)
  2138. return -EINVAL;
  2139. bio = bio_map_kern(q, kbuf, len, gfp_mask);
  2140. if (IS_ERR(bio))
  2141. return PTR_ERR(bio);
  2142. if (rq_data_dir(rq) == WRITE)
  2143. bio->bi_rw |= (1 << BIO_RW);
  2144. rq->bio = rq->biotail = bio;
  2145. blk_rq_bio_prep(q, rq, bio);
  2146. rq->buffer = rq->data = NULL;
  2147. rq->data_len = len;
  2148. return 0;
  2149. }
  2150. EXPORT_SYMBOL(blk_rq_map_kern);
  2151. /**
  2152. * blk_execute_rq_nowait - insert a request into queue for execution
  2153. * @q: queue to insert the request in
  2154. * @bd_disk: matching gendisk
  2155. * @rq: request to insert
  2156. * @at_head: insert request at head or tail of queue
  2157. * @done: I/O completion handler
  2158. *
  2159. * Description:
  2160. * Insert a fully prepared request at the back of the io scheduler queue
  2161. * for execution. Don't wait for completion.
  2162. */
  2163. void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
  2164. struct request *rq, int at_head,
  2165. rq_end_io_fn *done)
  2166. {
  2167. int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
  2168. rq->rq_disk = bd_disk;
  2169. rq->flags |= REQ_NOMERGE;
  2170. rq->end_io = done;
  2171. WARN_ON(irqs_disabled());
  2172. spin_lock_irq(q->queue_lock);
  2173. __elv_add_request(q, rq, where, 1);
  2174. __generic_unplug_device(q);
  2175. spin_unlock_irq(q->queue_lock);
  2176. }
  2177. EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
  2178. /**
  2179. * blk_execute_rq - insert a request into queue for execution
  2180. * @q: queue to insert the request in
  2181. * @bd_disk: matching gendisk
  2182. * @rq: request to insert
  2183. * @at_head: insert request at head or tail of queue
  2184. *
  2185. * Description:
  2186. * Insert a fully prepared request at the back of the io scheduler queue
  2187. * for execution and wait for completion.
  2188. */
  2189. int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
  2190. struct request *rq, int at_head)
  2191. {
  2192. DECLARE_COMPLETION_ONSTACK(wait);
  2193. char sense[SCSI_SENSE_BUFFERSIZE];
  2194. int err = 0;
  2195. /*
  2196. * we need an extra reference to the request, so we can look at
  2197. * it after io completion
  2198. */
  2199. rq->ref_count++;
  2200. if (!rq->sense) {
  2201. memset(sense, 0, sizeof(sense));
  2202. rq->sense = sense;
  2203. rq->sense_len = 0;
  2204. }
  2205. rq->waiting = &wait;
  2206. blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
  2207. wait_for_completion(&wait);
  2208. rq->waiting = NULL;
  2209. if (rq->errors)
  2210. err = -EIO;
  2211. return err;
  2212. }
  2213. EXPORT_SYMBOL(blk_execute_rq);
  2214. /**
  2215. * blkdev_issue_flush - queue a flush
  2216. * @bdev: blockdev to issue flush for
  2217. * @error_sector: error sector
  2218. *
  2219. * Description:
  2220. * Issue a flush for the block device in question. Caller can supply
  2221. * room for storing the error offset in case of a flush error, if they
  2222. * wish to. Caller must run wait_for_completion() on its own.
  2223. */
  2224. int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
  2225. {
  2226. request_queue_t *q;
  2227. if (bdev->bd_disk == NULL)
  2228. return -ENXIO;
  2229. q = bdev_get_queue(bdev);
  2230. if (!q)
  2231. return -ENXIO;
  2232. if (!q->issue_flush_fn)
  2233. return -EOPNOTSUPP;
  2234. return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
  2235. }
  2236. EXPORT_SYMBOL(blkdev_issue_flush);
  2237. static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
  2238. {
  2239. int rw = rq_data_dir(rq);
  2240. if (!blk_fs_request(rq) || !rq->rq_disk)
  2241. return;
  2242. if (!new_io) {
  2243. __disk_stat_inc(rq->rq_disk, merges[rw]);
  2244. } else {
  2245. disk_round_stats(rq->rq_disk);
  2246. rq->rq_disk->in_flight++;
  2247. }
  2248. }
  2249. /*
  2250. * add-request adds a request to the linked list.
  2251. * queue lock is held and interrupts disabled, as we muck with the
  2252. * request queue list.
  2253. */
  2254. static inline void add_request(request_queue_t * q, struct request * req)
  2255. {
  2256. drive_stat_acct(req, req->nr_sectors, 1);
  2257. if (q->activity_fn)
  2258. q->activity_fn(q->activity_data, rq_data_dir(req));
  2259. /*
  2260. * elevator indicated where it wants this request to be
  2261. * inserted at elevator_merge time
  2262. */
  2263. __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
  2264. }
  2265. /*
  2266. * disk_round_stats() - Round off the performance stats on a struct
  2267. * disk_stats.
  2268. *
  2269. * The average IO queue length and utilisation statistics are maintained
  2270. * by observing the current state of the queue length and the amount of
  2271. * time it has been in this state for.
  2272. *
  2273. * Normally, that accounting is done on IO completion, but that can result
  2274. * in more than a second's worth of IO being accounted for within any one
  2275. * second, leading to >100% utilisation. To deal with that, we call this
  2276. * function to do a round-off before returning the results when reading
  2277. * /proc/diskstats. This accounts immediately for all queue usage up to
  2278. * the current jiffies and restarts the counters again.
  2279. */
  2280. void disk_round_stats(struct gendisk *disk)
  2281. {
  2282. unsigned long now = jiffies;
  2283. if (now == disk->stamp)
  2284. return;
  2285. if (disk->in_flight) {
  2286. __disk_stat_add(disk, time_in_queue,
  2287. disk->in_flight * (now - disk->stamp));
  2288. __disk_stat_add(disk, io_ticks, (now - disk->stamp));
  2289. }
  2290. disk->stamp = now;
  2291. }
  2292. EXPORT_SYMBOL_GPL(disk_round_stats);
  2293. /*
  2294. * queue lock must be held
  2295. */
  2296. void __blk_put_request(request_queue_t *q, struct request *req)
  2297. {
  2298. struct request_list *rl = req->rl;
  2299. if (unlikely(!q))
  2300. return;
  2301. if (unlikely(--req->ref_count))
  2302. return;
  2303. elv_completed_request(q, req);
  2304. req->rq_status = RQ_INACTIVE;
  2305. req->rl = NULL;
  2306. /*
  2307. * Request may not have originated from ll_rw_blk. if not,
  2308. * it didn't come out of our reserved rq pools
  2309. */
  2310. if (rl) {
  2311. int rw = rq_data_dir(req);
  2312. int priv = req->flags & REQ_ELVPRIV;
  2313. BUG_ON(!list_empty(&req->queuelist));
  2314. blk_free_request(q, req);
  2315. freed_request(q, rw, priv);
  2316. }
  2317. }
  2318. EXPORT_SYMBOL_GPL(__blk_put_request);
  2319. void blk_put_request(struct request *req)
  2320. {
  2321. unsigned long flags;
  2322. request_queue_t *q = req->q;
  2323. /*
  2324. * Gee, IDE calls in w/ NULL q. Fix IDE and remove the
  2325. * following if (q) test.
  2326. */
  2327. if (q) {
  2328. spin_lock_irqsave(q->queue_lock, flags);
  2329. __blk_put_request(q, req);
  2330. spin_unlock_irqrestore(q->queue_lock, flags);
  2331. }
  2332. }
  2333. EXPORT_SYMBOL(blk_put_request);
  2334. /**
  2335. * blk_end_sync_rq - executes a completion event on a request
  2336. * @rq: request to complete
  2337. * @error: end io status of the request
  2338. */
  2339. void blk_end_sync_rq(struct request *rq, int error)
  2340. {
  2341. struct completion *waiting = rq->waiting;
  2342. rq->waiting = NULL;
  2343. __blk_put_request(rq->q, rq);
  2344. /*
  2345. * complete last, if this is a stack request the process (and thus
  2346. * the rq pointer) could be invalid right after this complete()
  2347. */
  2348. complete(waiting);
  2349. }
  2350. EXPORT_SYMBOL(blk_end_sync_rq);
  2351. /**
  2352. * blk_congestion_wait - wait for a queue to become uncongested
  2353. * @rw: READ or WRITE
  2354. * @timeout: timeout in jiffies
  2355. *
  2356. * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion.
  2357. * If no queues are congested then just wait for the next request to be
  2358. * returned.
  2359. */
  2360. long blk_congestion_wait(int rw, long timeout)
  2361. {
  2362. long ret;
  2363. DEFINE_WAIT(wait);
  2364. wait_queue_head_t *wqh = &congestion_wqh[rw];
  2365. prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
  2366. ret = io_schedule_timeout(timeout);
  2367. finish_wait(wqh, &wait);
  2368. return ret;
  2369. }
  2370. EXPORT_SYMBOL(blk_congestion_wait);
  2371. /**
  2372. * blk_congestion_end - wake up sleepers on a congestion queue
  2373. * @rw: READ or WRITE
  2374. */
  2375. void blk_congestion_end(int rw)
  2376. {
  2377. wait_queue_head_t *wqh = &congestion_wqh[rw];
  2378. if (waitqueue_active(wqh))
  2379. wake_up(wqh);
  2380. }
  2381. /*
  2382. * Has to be called with the request spinlock acquired
  2383. */
  2384. static int attempt_merge(request_queue_t *q, struct request *req,
  2385. struct request *next)
  2386. {
  2387. if (!rq_mergeable(req) || !rq_mergeable(next))
  2388. return 0;
  2389. /*
  2390. * not contiguous
  2391. */
  2392. if (req->sector + req->nr_sectors != next->sector)
  2393. return 0;
  2394. if (rq_data_dir(req) != rq_data_dir(next)
  2395. || req->rq_disk != next->rq_disk
  2396. || next->waiting || next->special)
  2397. return 0;
  2398. /*
  2399. * If we are allowed to merge, then append bio list
  2400. * from next to rq and release next. merge_requests_fn
  2401. * will have updated segment counts, update sector
  2402. * counts here.
  2403. */
  2404. if (!q->merge_requests_fn(q, req, next))
  2405. return 0;
  2406. /*
  2407. * At this point we have either done a back merge
  2408. * or front merge. We need the smaller start_time of
  2409. * the merged requests to be the current request
  2410. * for accounting purposes.
  2411. */
  2412. if (time_after(req->start_time, next->start_time))
  2413. req->start_time = next->start_time;
  2414. req->biotail->bi_next = next->bio;
  2415. req->biotail = next->biotail;
  2416. req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
  2417. elv_merge_requests(q, req, next);
  2418. if (req->rq_disk) {
  2419. disk_round_stats(req->rq_disk);
  2420. req->rq_disk->in_flight--;
  2421. }
  2422. req->ioprio = ioprio_best(req->ioprio, next->ioprio);
  2423. __blk_put_request(q, next);
  2424. return 1;
  2425. }
  2426. static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
  2427. {
  2428. struct request *next = elv_latter_request(q, rq);
  2429. if (next)
  2430. return attempt_merge(q, rq, next);
  2431. return 0;
  2432. }
  2433. static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
  2434. {
  2435. struct request *prev = elv_former_request(q, rq);
  2436. if (prev)
  2437. return attempt_merge(q, prev, rq);
  2438. return 0;
  2439. }
  2440. static void init_request_from_bio(struct request *req, struct bio *bio)
  2441. {
  2442. req->flags |= REQ_CMD;
  2443. /*
  2444. * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
  2445. */
  2446. if (bio_rw_ahead(bio) || bio_failfast(bio))
  2447. req->flags |= REQ_FAILFAST;
  2448. /*
  2449. * REQ_BARRIER implies no merging, but lets make it explicit
  2450. */
  2451. if (unlikely(bio_barrier(bio)))
  2452. req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
  2453. if (bio_sync(bio))
  2454. req->flags |= REQ_RW_SYNC;
  2455. req->errors = 0;
  2456. req->hard_sector = req->sector = bio->bi_sector;
  2457. req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
  2458. req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
  2459. req->nr_phys_segments = bio_phys_segments(req->q, bio);
  2460. req->nr_hw_segments = bio_hw_segments(req->q, bio);
  2461. req->buffer = bio_data(bio); /* see ->buffer comment above */
  2462. req->waiting = NULL;
  2463. req->bio = req->biotail = bio;
  2464. req->ioprio = bio_prio(bio);
  2465. req->rq_disk = bio->bi_bdev->bd_disk;
  2466. req->start_time = jiffies;
  2467. }
  2468. static int __make_request(request_queue_t *q, struct bio *bio)
  2469. {
  2470. struct request *req;
  2471. int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
  2472. unsigned short prio;
  2473. sector_t sector;
  2474. sector = bio->bi_sector;
  2475. nr_sectors = bio_sectors(bio);
  2476. cur_nr_sectors = bio_cur_sectors(bio);
  2477. prio = bio_prio(bio);
  2478. rw = bio_data_dir(bio);
  2479. sync = bio_sync(bio);
  2480. /*
  2481. * low level driver can indicate that it wants pages above a
  2482. * certain limit bounced to low memory (ie for highmem, or even
  2483. * ISA dma in theory)
  2484. */
  2485. blk_queue_bounce(q, &bio);
  2486. spin_lock_prefetch(q->queue_lock);
  2487. barrier = bio_barrier(bio);
  2488. if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
  2489. err = -EOPNOTSUPP;
  2490. goto end_io;
  2491. }
  2492. spin_lock_irq(q->queue_lock);
  2493. if (unlikely(barrier) || elv_queue_empty(q))
  2494. goto get_rq;
  2495. el_ret = elv_merge(q, &req, bio);
  2496. switch (el_ret) {
  2497. case ELEVATOR_BACK_MERGE:
  2498. BUG_ON(!rq_mergeable(req));
  2499. if (!q->back_merge_fn(q, req, bio))
  2500. break;
  2501. blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
  2502. req->biotail->bi_next = bio;
  2503. req->biotail = bio;
  2504. req->nr_sectors = req->hard_nr_sectors += nr_sectors;
  2505. req->ioprio = ioprio_best(req->ioprio, prio);
  2506. drive_stat_acct(req, nr_sectors, 0);
  2507. if (!attempt_back_merge(q, req))
  2508. elv_merged_request(q, req);
  2509. goto out;
  2510. case ELEVATOR_FRONT_MERGE:
  2511. BUG_ON(!rq_mergeable(req));
  2512. if (!q->front_merge_fn(q, req, bio))
  2513. break;
  2514. blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
  2515. bio->bi_next = req->bio;
  2516. req->bio = bio;
  2517. /*
  2518. * may not be valid. if the low level driver said
  2519. * it didn't need a bounce buffer then it better
  2520. * not touch req->buffer either...
  2521. */
  2522. req->buffer = bio_data(bio);
  2523. req->current_nr_sectors = cur_nr_sectors;
  2524. req->hard_cur_sectors = cur_nr_sectors;
  2525. req->sector = req->hard_sector = sector;
  2526. req->nr_sectors = req->hard_nr_sectors += nr_sectors;
  2527. req->ioprio = ioprio_best(req->ioprio, prio);
  2528. drive_stat_acct(req, nr_sectors, 0);
  2529. if (!attempt_front_merge(q, req))
  2530. elv_merged_request(q, req);
  2531. goto out;
  2532. /* ELV_NO_MERGE: elevator says don't/can't merge. */
  2533. default:
  2534. ;
  2535. }
  2536. get_rq:
  2537. /*
  2538. * Grab a free request. This is might sleep but can not fail.
  2539. * Returns with the queue unlocked.
  2540. */
  2541. req = get_request_wait(q, rw, bio);
  2542. /*
  2543. * After dropping the lock and possibly sleeping here, our request
  2544. * may now be mergeable after it had proven unmergeable (above).
  2545. * We don't worry about that case for efficiency. It won't happen
  2546. * often, and the elevators are able to handle it.
  2547. */
  2548. init_request_from_bio(req, bio);
  2549. spin_lock_irq(q->queue_lock);
  2550. if (elv_queue_empty(q))
  2551. blk_plug_device(q);
  2552. add_request(q, req);
  2553. out:
  2554. if (sync)
  2555. __generic_unplug_device(q);
  2556. spin_unlock_irq(q->queue_lock);
  2557. return 0;
  2558. end_io:
  2559. bio_endio(bio, nr_sectors << 9, err);
  2560. return 0;
  2561. }
  2562. /*
  2563. * If bio->bi_dev is a partition, remap the location
  2564. */
  2565. static inline void blk_partition_remap(struct bio *bio)
  2566. {
  2567. struct block_device *bdev = bio->bi_bdev;
  2568. if (bdev != bdev->bd_contains) {
  2569. struct hd_struct *p = bdev->bd_part;
  2570. const int rw = bio_data_dir(bio);
  2571. p->sectors[rw] += bio_sectors(bio);
  2572. p->ios[rw]++;
  2573. bio->bi_sector += p->start_sect;
  2574. bio->bi_bdev = bdev->bd_contains;
  2575. }
  2576. }
  2577. static void handle_bad_sector(struct bio *bio)
  2578. {
  2579. char b[BDEVNAME_SIZE];
  2580. printk(KERN_INFO "attempt to access beyond end of device\n");
  2581. printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
  2582. bdevname(bio->bi_bdev, b),
  2583. bio->bi_rw,
  2584. (unsigned long long)bio->bi_sector + bio_sectors(bio),
  2585. (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
  2586. set_bit(BIO_EOF, &bio->bi_flags);
  2587. }
  2588. /**
  2589. * generic_make_request: hand a buffer to its device driver for I/O
  2590. * @bio: The bio describing the location in memory and on the device.
  2591. *
  2592. * generic_make_request() is used to make I/O requests of block
  2593. * devices. It is passed a &struct bio, which describes the I/O that needs
  2594. * to be done.
  2595. *
  2596. * generic_make_request() does not return any status. The
  2597. * success/failure status of the request, along with notification of
  2598. * completion, is delivered asynchronously through the bio->bi_end_io
  2599. * function described (one day) else where.
  2600. *
  2601. * The caller of generic_make_request must make sure that bi_io_vec
  2602. * are set to describe the memory buffer, and that bi_dev and bi_sector are
  2603. * set to describe the device address, and the
  2604. * bi_end_io and optionally bi_private are set to describe how
  2605. * completion notification should be signaled.
  2606. *
  2607. * generic_make_request and the drivers it calls may use bi_next if this
  2608. * bio happens to be merged with someone else, and may change bi_dev and
  2609. * bi_sector for remaps as it sees fit. So the values of these fields
  2610. * should NOT be depended on after the call to generic_make_request.
  2611. */
  2612. void generic_make_request(struct bio *bio)
  2613. {
  2614. request_queue_t *q;
  2615. sector_t maxsector;
  2616. int ret, nr_sectors = bio_sectors(bio);
  2617. dev_t old_dev;
  2618. might_sleep();
  2619. /* Test device or partition size, when known. */
  2620. maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
  2621. if (maxsector) {
  2622. sector_t sector = bio->bi_sector;
  2623. if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
  2624. /*
  2625. * This may well happen - the kernel calls bread()
  2626. * without checking the size of the device, e.g., when
  2627. * mounting a device.
  2628. */
  2629. handle_bad_sector(bio);
  2630. goto end_io;
  2631. }
  2632. }
  2633. /*
  2634. * Resolve the mapping until finished. (drivers are
  2635. * still free to implement/resolve their own stacking
  2636. * by explicitly returning 0)
  2637. *
  2638. * NOTE: we don't repeat the blk_size check for each new device.
  2639. * Stacking drivers are expected to know what they are doing.
  2640. */
  2641. maxsector = -1;
  2642. old_dev = 0;
  2643. do {
  2644. char b[BDEVNAME_SIZE];
  2645. q = bdev_get_queue(bio->bi_bdev);
  2646. if (!q) {
  2647. printk(KERN_ERR
  2648. "generic_make_request: Trying to access "
  2649. "nonexistent block-device %s (%Lu)\n",
  2650. bdevname(bio->bi_bdev, b),
  2651. (long long) bio->bi_sector);
  2652. end_io:
  2653. bio_endio(bio, bio->bi_size, -EIO);
  2654. break;
  2655. }
  2656. if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
  2657. printk("bio too big device %s (%u > %u)\n",
  2658. bdevname(bio->bi_bdev, b),
  2659. bio_sectors(bio),
  2660. q->max_hw_sectors);
  2661. goto end_io;
  2662. }
  2663. if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
  2664. goto end_io;
  2665. /*
  2666. * If this device has partitions, remap block n
  2667. * of partition p to block n+start(p) of the disk.
  2668. */
  2669. blk_partition_remap(bio);
  2670. if (maxsector != -1)
  2671. blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
  2672. maxsector);
  2673. blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
  2674. maxsector = bio->bi_sector;
  2675. old_dev = bio->bi_bdev->bd_dev;
  2676. ret = q->make_request_fn(q, bio);
  2677. } while (ret);
  2678. }
  2679. EXPORT_SYMBOL(generic_make_request);
  2680. /**
  2681. * submit_bio: submit a bio to the block device layer for I/O
  2682. * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
  2683. * @bio: The &struct bio which describes the I/O
  2684. *
  2685. * submit_bio() is very similar in purpose to generic_make_request(), and
  2686. * uses that function to do most of the work. Both are fairly rough
  2687. * interfaces, @bio must be presetup and ready for I/O.
  2688. *
  2689. */
  2690. void submit_bio(int rw, struct bio *bio)
  2691. {
  2692. int count = bio_sectors(bio);
  2693. BIO_BUG_ON(!bio->bi_size);
  2694. BIO_BUG_ON(!bio->bi_io_vec);
  2695. bio->bi_rw |= rw;
  2696. if (rw & WRITE)
  2697. count_vm_events(PGPGOUT, count);
  2698. else
  2699. count_vm_events(PGPGIN, count);
  2700. if (unlikely(block_dump)) {
  2701. char b[BDEVNAME_SIZE];
  2702. printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
  2703. current->comm, current->pid,
  2704. (rw & WRITE) ? "WRITE" : "READ",
  2705. (unsigned long long)bio->bi_sector,
  2706. bdevname(bio->bi_bdev,b));
  2707. }
  2708. generic_make_request(bio);
  2709. }
  2710. EXPORT_SYMBOL(submit_bio);
  2711. static void blk_recalc_rq_segments(struct request *rq)
  2712. {
  2713. struct bio *bio, *prevbio = NULL;
  2714. int nr_phys_segs, nr_hw_segs;
  2715. unsigned int phys_size, hw_size;
  2716. request_queue_t *q = rq->q;
  2717. if (!rq->bio)
  2718. return;
  2719. phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
  2720. rq_for_each_bio(bio, rq) {
  2721. /* Force bio hw/phys segs to be recalculated. */
  2722. bio->bi_flags &= ~(1 << BIO_SEG_VALID);
  2723. nr_phys_segs += bio_phys_segments(q, bio);
  2724. nr_hw_segs += bio_hw_segments(q, bio);
  2725. if (prevbio) {
  2726. int pseg = phys_size + prevbio->bi_size + bio->bi_size;
  2727. int hseg = hw_size + prevbio->bi_size + bio->bi_size;
  2728. if (blk_phys_contig_segment(q, prevbio, bio) &&
  2729. pseg <= q->max_segment_size) {
  2730. nr_phys_segs--;
  2731. phys_size += prevbio->bi_size + bio->bi_size;
  2732. } else
  2733. phys_size = 0;
  2734. if (blk_hw_contig_segment(q, prevbio, bio) &&
  2735. hseg <= q->max_segment_size) {
  2736. nr_hw_segs--;
  2737. hw_size += prevbio->bi_size + bio->bi_size;
  2738. } else
  2739. hw_size = 0;
  2740. }
  2741. prevbio = bio;
  2742. }
  2743. rq->nr_phys_segments = nr_phys_segs;
  2744. rq->nr_hw_segments = nr_hw_segs;
  2745. }
  2746. static void blk_recalc_rq_sectors(struct request *rq, int nsect)
  2747. {
  2748. if (blk_fs_request(rq)) {
  2749. rq->hard_sector += nsect;
  2750. rq->hard_nr_sectors -= nsect;
  2751. /*
  2752. * Move the I/O submission pointers ahead if required.
  2753. */
  2754. if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
  2755. (rq->sector <= rq->hard_sector)) {
  2756. rq->sector = rq->hard_sector;
  2757. rq->nr_sectors = rq->hard_nr_sectors;
  2758. rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
  2759. rq->current_nr_sectors = rq->hard_cur_sectors;
  2760. rq->buffer = bio_data(rq->bio);
  2761. }
  2762. /*
  2763. * if total number of sectors is less than the first segment
  2764. * size, something has gone terribly wrong
  2765. */
  2766. if (rq->nr_sectors < rq->current_nr_sectors) {
  2767. printk("blk: request botched\n");
  2768. rq->nr_sectors = rq->current_nr_sectors;
  2769. }
  2770. }
  2771. }
  2772. static int __end_that_request_first(struct request *req, int uptodate,
  2773. int nr_bytes)
  2774. {
  2775. int total_bytes, bio_nbytes, error, next_idx = 0;
  2776. struct bio *bio;
  2777. blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
  2778. /*
  2779. * extend uptodate bool to allow < 0 value to be direct io error
  2780. */
  2781. error = 0;
  2782. if (end_io_error(uptodate))
  2783. error = !uptodate ? -EIO : uptodate;
  2784. /*
  2785. * for a REQ_BLOCK_PC request, we want to carry any eventual
  2786. * sense key with us all the way through
  2787. */
  2788. if (!blk_pc_request(req))
  2789. req->errors = 0;
  2790. if (!uptodate) {
  2791. if (blk_fs_request(req) && !(req->flags & REQ_QUIET))
  2792. printk("end_request: I/O error, dev %s, sector %llu\n",
  2793. req->rq_disk ? req->rq_disk->disk_name : "?",
  2794. (unsigned long long)req->sector);
  2795. }
  2796. if (blk_fs_request(req) && req->rq_disk) {
  2797. const int rw = rq_data_dir(req);
  2798. disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
  2799. }
  2800. total_bytes = bio_nbytes = 0;
  2801. while ((bio = req->bio) != NULL) {
  2802. int nbytes;
  2803. if (nr_bytes >= bio->bi_size) {
  2804. req->bio = bio->bi_next;
  2805. nbytes = bio->bi_size;
  2806. if (!ordered_bio_endio(req, bio, nbytes, error))
  2807. bio_endio(bio, nbytes, error);
  2808. next_idx = 0;
  2809. bio_nbytes = 0;
  2810. } else {
  2811. int idx = bio->bi_idx + next_idx;
  2812. if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
  2813. blk_dump_rq_flags(req, "__end_that");
  2814. printk("%s: bio idx %d >= vcnt %d\n",
  2815. __FUNCTION__,
  2816. bio->bi_idx, bio->bi_vcnt);
  2817. break;
  2818. }
  2819. nbytes = bio_iovec_idx(bio, idx)->bv_len;
  2820. BIO_BUG_ON(nbytes > bio->bi_size);
  2821. /*
  2822. * not a complete bvec done
  2823. */
  2824. if (unlikely(nbytes > nr_bytes)) {
  2825. bio_nbytes += nr_bytes;
  2826. total_bytes += nr_bytes;
  2827. break;
  2828. }
  2829. /*
  2830. * advance to the next vector
  2831. */
  2832. next_idx++;
  2833. bio_nbytes += nbytes;
  2834. }
  2835. total_bytes += nbytes;
  2836. nr_bytes -= nbytes;
  2837. if ((bio = req->bio)) {
  2838. /*
  2839. * end more in this run, or just return 'not-done'
  2840. */
  2841. if (unlikely(nr_bytes <= 0))
  2842. break;
  2843. }
  2844. }
  2845. /*
  2846. * completely done
  2847. */
  2848. if (!req->bio)
  2849. return 0;
  2850. /*
  2851. * if the request wasn't completed, update state
  2852. */
  2853. if (bio_nbytes) {
  2854. if (!ordered_bio_endio(req, bio, bio_nbytes, error))
  2855. bio_endio(bio, bio_nbytes, error);
  2856. bio->bi_idx += next_idx;
  2857. bio_iovec(bio)->bv_offset += nr_bytes;
  2858. bio_iovec(bio)->bv_len -= nr_bytes;
  2859. }
  2860. blk_recalc_rq_sectors(req, total_bytes >> 9);
  2861. blk_recalc_rq_segments(req);
  2862. return 1;
  2863. }
  2864. /**
  2865. * end_that_request_first - end I/O on a request
  2866. * @req: the request being processed
  2867. * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
  2868. * @nr_sectors: number of sectors to end I/O on
  2869. *
  2870. * Description:
  2871. * Ends I/O on a number of sectors attached to @req, and sets it up
  2872. * for the next range of segments (if any) in the cluster.
  2873. *
  2874. * Return:
  2875. * 0 - we are done with this request, call end_that_request_last()
  2876. * 1 - still buffers pending for this request
  2877. **/
  2878. int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
  2879. {
  2880. return __end_that_request_first(req, uptodate, nr_sectors << 9);
  2881. }
  2882. EXPORT_SYMBOL(end_that_request_first);
  2883. /**
  2884. * end_that_request_chunk - end I/O on a request
  2885. * @req: the request being processed
  2886. * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
  2887. * @nr_bytes: number of bytes to complete
  2888. *
  2889. * Description:
  2890. * Ends I/O on a number of bytes attached to @req, and sets it up
  2891. * for the next range of segments (if any). Like end_that_request_first(),
  2892. * but deals with bytes instead of sectors.
  2893. *
  2894. * Return:
  2895. * 0 - we are done with this request, call end_that_request_last()
  2896. * 1 - still buffers pending for this request
  2897. **/
  2898. int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
  2899. {
  2900. return __end_that_request_first(req, uptodate, nr_bytes);
  2901. }
  2902. EXPORT_SYMBOL(end_that_request_chunk);
  2903. /*
  2904. * splice the completion data to a local structure and hand off to
  2905. * process_completion_queue() to complete the requests
  2906. */
  2907. static void blk_done_softirq(struct softirq_action *h)
  2908. {
  2909. struct list_head *cpu_list, local_list;
  2910. local_irq_disable();
  2911. cpu_list = &__get_cpu_var(blk_cpu_done);
  2912. list_replace_init(cpu_list, &local_list);
  2913. local_irq_enable();
  2914. while (!list_empty(&local_list)) {
  2915. struct request *rq = list_entry(local_list.next, struct request, donelist);
  2916. list_del_init(&rq->donelist);
  2917. rq->q->softirq_done_fn(rq);
  2918. }
  2919. }
  2920. #ifdef CONFIG_HOTPLUG_CPU
  2921. static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
  2922. void *hcpu)
  2923. {
  2924. /*
  2925. * If a CPU goes away, splice its entries to the current CPU
  2926. * and trigger a run of the softirq
  2927. */
  2928. if (action == CPU_DEAD) {
  2929. int cpu = (unsigned long) hcpu;
  2930. local_irq_disable();
  2931. list_splice_init(&per_cpu(blk_cpu_done, cpu),
  2932. &__get_cpu_var(blk_cpu_done));
  2933. raise_softirq_irqoff(BLOCK_SOFTIRQ);
  2934. local_irq_enable();
  2935. }
  2936. return NOTIFY_OK;
  2937. }
  2938. static struct notifier_block __devinitdata blk_cpu_notifier = {
  2939. .notifier_call = blk_cpu_notify,
  2940. };
  2941. #endif /* CONFIG_HOTPLUG_CPU */
  2942. /**
  2943. * blk_complete_request - end I/O on a request
  2944. * @req: the request being processed
  2945. *
  2946. * Description:
  2947. * Ends all I/O on a request. It does not handle partial completions,
  2948. * unless the driver actually implements this in its completion callback
  2949. * through requeueing. Theh actual completion happens out-of-order,
  2950. * through a softirq handler. The user must have registered a completion
  2951. * callback through blk_queue_softirq_done().
  2952. **/
  2953. void blk_complete_request(struct request *req)
  2954. {
  2955. struct list_head *cpu_list;
  2956. unsigned long flags;
  2957. BUG_ON(!req->q->softirq_done_fn);
  2958. local_irq_save(flags);
  2959. cpu_list = &__get_cpu_var(blk_cpu_done);
  2960. list_add_tail(&req->donelist, cpu_list);
  2961. raise_softirq_irqoff(BLOCK_SOFTIRQ);
  2962. local_irq_restore(flags);
  2963. }
  2964. EXPORT_SYMBOL(blk_complete_request);
  2965. /*
  2966. * queue lock must be held
  2967. */
  2968. void end_that_request_last(struct request *req, int uptodate)
  2969. {
  2970. struct gendisk *disk = req->rq_disk;
  2971. int error;
  2972. /*
  2973. * extend uptodate bool to allow < 0 value to be direct io error
  2974. */
  2975. error = 0;
  2976. if (end_io_error(uptodate))
  2977. error = !uptodate ? -EIO : uptodate;
  2978. if (unlikely(laptop_mode) && blk_fs_request(req))
  2979. laptop_io_completion();
  2980. /*
  2981. * Account IO completion. bar_rq isn't accounted as a normal
  2982. * IO on queueing nor completion. Accounting the containing
  2983. * request is enough.
  2984. */
  2985. if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
  2986. unsigned long duration = jiffies - req->start_time;
  2987. const int rw = rq_data_dir(req);
  2988. __disk_stat_inc(disk, ios[rw]);
  2989. __disk_stat_add(disk, ticks[rw], duration);
  2990. disk_round_stats(disk);
  2991. disk->in_flight--;
  2992. }
  2993. if (req->end_io)
  2994. req->end_io(req, error);
  2995. else
  2996. __blk_put_request(req->q, req);
  2997. }
  2998. EXPORT_SYMBOL(end_that_request_last);
  2999. void end_request(struct request *req, int uptodate)
  3000. {
  3001. if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
  3002. add_disk_randomness(req->rq_disk);
  3003. blkdev_dequeue_request(req);
  3004. end_that_request_last(req, uptodate);
  3005. }
  3006. }
  3007. EXPORT_SYMBOL(end_request);
  3008. void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
  3009. {
  3010. /* first two bits are identical in rq->flags and bio->bi_rw */
  3011. rq->flags |= (bio->bi_rw & 3);
  3012. rq->nr_phys_segments = bio_phys_segments(q, bio);
  3013. rq->nr_hw_segments = bio_hw_segments(q, bio);
  3014. rq->current_nr_sectors = bio_cur_sectors(bio);
  3015. rq->hard_cur_sectors = rq->current_nr_sectors;
  3016. rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
  3017. rq->buffer = bio_data(bio);
  3018. rq->bio = rq->biotail = bio;
  3019. }
  3020. EXPORT_SYMBOL(blk_rq_bio_prep);
  3021. int kblockd_schedule_work(struct work_struct *work)
  3022. {
  3023. return queue_work(kblockd_workqueue, work);
  3024. }
  3025. EXPORT_SYMBOL(kblockd_schedule_work);
  3026. void kblockd_flush(void)
  3027. {
  3028. flush_workqueue(kblockd_workqueue);
  3029. }
  3030. EXPORT_SYMBOL(kblockd_flush);
  3031. int __init blk_dev_init(void)
  3032. {
  3033. int i;
  3034. kblockd_workqueue = create_workqueue("kblockd");
  3035. if (!kblockd_workqueue)
  3036. panic("Failed to create kblockd\n");
  3037. request_cachep = kmem_cache_create("blkdev_requests",
  3038. sizeof(struct request), 0, SLAB_PANIC, NULL, NULL);
  3039. requestq_cachep = kmem_cache_create("blkdev_queue",
  3040. sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL);
  3041. iocontext_cachep = kmem_cache_create("blkdev_ioc",
  3042. sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
  3043. for_each_possible_cpu(i)
  3044. INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
  3045. open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
  3046. register_hotcpu_notifier(&blk_cpu_notifier);
  3047. blk_max_low_pfn = max_low_pfn;
  3048. blk_max_pfn = max_pfn;
  3049. return 0;
  3050. }
  3051. /*
  3052. * IO Context helper functions
  3053. */
  3054. void put_io_context(struct io_context *ioc)
  3055. {
  3056. if (ioc == NULL)
  3057. return;
  3058. BUG_ON(atomic_read(&ioc->refcount) == 0);
  3059. if (atomic_dec_and_test(&ioc->refcount)) {
  3060. struct cfq_io_context *cic;
  3061. rcu_read_lock();
  3062. if (ioc->aic && ioc->aic->dtor)
  3063. ioc->aic->dtor(ioc->aic);
  3064. if (ioc->cic_root.rb_node != NULL) {
  3065. struct rb_node *n = rb_first(&ioc->cic_root);
  3066. cic = rb_entry(n, struct cfq_io_context, rb_node);
  3067. cic->dtor(ioc);
  3068. }
  3069. rcu_read_unlock();
  3070. kmem_cache_free(iocontext_cachep, ioc);
  3071. }
  3072. }
  3073. EXPORT_SYMBOL(put_io_context);
  3074. /* Called by the exitting task */
  3075. void exit_io_context(void)
  3076. {
  3077. unsigned long flags;
  3078. struct io_context *ioc;
  3079. struct cfq_io_context *cic;
  3080. local_irq_save(flags);
  3081. task_lock(current);
  3082. ioc = current->io_context;
  3083. current->io_context = NULL;
  3084. ioc->task = NULL;
  3085. task_unlock(current);
  3086. local_irq_restore(flags);
  3087. if (ioc->aic && ioc->aic->exit)
  3088. ioc->aic->exit(ioc->aic);
  3089. if (ioc->cic_root.rb_node != NULL) {
  3090. cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node);
  3091. cic->exit(ioc);
  3092. }
  3093. put_io_context(ioc);
  3094. }
  3095. /*
  3096. * If the current task has no IO context then create one and initialise it.
  3097. * Otherwise, return its existing IO context.
  3098. *
  3099. * This returned IO context doesn't have a specifically elevated refcount,
  3100. * but since the current task itself holds a reference, the context can be
  3101. * used in general code, so long as it stays within `current` context.
  3102. */
  3103. struct io_context *current_io_context(gfp_t gfp_flags)
  3104. {
  3105. struct task_struct *tsk = current;
  3106. struct io_context *ret;
  3107. ret = tsk->io_context;
  3108. if (likely(ret))
  3109. return ret;
  3110. ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
  3111. if (ret) {
  3112. atomic_set(&ret->refcount, 1);
  3113. ret->task = current;
  3114. ret->set_ioprio = NULL;
  3115. ret->last_waited = jiffies; /* doesn't matter... */
  3116. ret->nr_batch_requests = 0; /* because this is 0 */
  3117. ret->aic = NULL;
  3118. ret->cic_root.rb_node = NULL;
  3119. /* make sure set_task_ioprio() sees the settings above */
  3120. smp_wmb();
  3121. tsk->io_context = ret;
  3122. }
  3123. return ret;
  3124. }
  3125. EXPORT_SYMBOL(current_io_context);
  3126. /*
  3127. * If the current task has no IO context then create one and initialise it.
  3128. * If it does have a context, take a ref on it.
  3129. *
  3130. * This is always called in the context of the task which submitted the I/O.
  3131. */
  3132. struct io_context *get_io_context(gfp_t gfp_flags)
  3133. {
  3134. struct io_context *ret;
  3135. ret = current_io_context(gfp_flags);
  3136. if (likely(ret))
  3137. atomic_inc(&ret->refcount);
  3138. return ret;
  3139. }
  3140. EXPORT_SYMBOL(get_io_context);
  3141. void copy_io_context(struct io_context **pdst, struct io_context **psrc)
  3142. {
  3143. struct io_context *src = *psrc;
  3144. struct io_context *dst = *pdst;
  3145. if (src) {
  3146. BUG_ON(atomic_read(&src->refcount) == 0);
  3147. atomic_inc(&src->refcount);
  3148. put_io_context(dst);
  3149. *pdst = src;
  3150. }
  3151. }
  3152. EXPORT_SYMBOL(copy_io_context);
  3153. void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
  3154. {
  3155. struct io_context *temp;
  3156. temp = *ioc1;
  3157. *ioc1 = *ioc2;
  3158. *ioc2 = temp;
  3159. }
  3160. EXPORT_SYMBOL(swap_io_context);
  3161. /*
  3162. * sysfs parts below
  3163. */
  3164. struct queue_sysfs_entry {
  3165. struct attribute attr;
  3166. ssize_t (*show)(struct request_queue *, char *);
  3167. ssize_t (*store)(struct request_queue *, const char *, size_t);
  3168. };
  3169. static ssize_t
  3170. queue_var_show(unsigned int var, char *page)
  3171. {
  3172. return sprintf(page, "%d\n", var);
  3173. }
  3174. static ssize_t
  3175. queue_var_store(unsigned long *var, const char *page, size_t count)
  3176. {
  3177. char *p = (char *) page;
  3178. *var = simple_strtoul(p, &p, 10);
  3179. return count;
  3180. }
  3181. static ssize_t queue_requests_show(struct request_queue *q, char *page)
  3182. {
  3183. return queue_var_show(q->nr_requests, (page));
  3184. }
  3185. static ssize_t
  3186. queue_requests_store(struct request_queue *q, const char *page, size_t count)
  3187. {
  3188. struct request_list *rl = &q->rq;
  3189. unsigned long nr;
  3190. int ret = queue_var_store(&nr, page, count);
  3191. if (nr < BLKDEV_MIN_RQ)
  3192. nr = BLKDEV_MIN_RQ;
  3193. spin_lock_irq(q->queue_lock);
  3194. q->nr_requests = nr;
  3195. blk_queue_congestion_threshold(q);
  3196. if (rl->count[READ] >= queue_congestion_on_threshold(q))
  3197. set_queue_congested(q, READ);
  3198. else if (rl->count[READ] < queue_congestion_off_threshold(q))
  3199. clear_queue_congested(q, READ);
  3200. if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
  3201. set_queue_congested(q, WRITE);
  3202. else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
  3203. clear_queue_congested(q, WRITE);
  3204. if (rl->count[READ] >= q->nr_requests) {
  3205. blk_set_queue_full(q, READ);
  3206. } else if (rl->count[READ]+1 <= q->nr_requests) {
  3207. blk_clear_queue_full(q, READ);
  3208. wake_up(&rl->wait[READ]);
  3209. }
  3210. if (rl->count[WRITE] >= q->nr_requests) {
  3211. blk_set_queue_full(q, WRITE);
  3212. } else if (rl->count[WRITE]+1 <= q->nr_requests) {
  3213. blk_clear_queue_full(q, WRITE);
  3214. wake_up(&rl->wait[WRITE]);
  3215. }
  3216. spin_unlock_irq(q->queue_lock);
  3217. return ret;
  3218. }
  3219. static ssize_t queue_ra_show(struct request_queue *q, char *page)
  3220. {
  3221. int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
  3222. return queue_var_show(ra_kb, (page));
  3223. }
  3224. static ssize_t
  3225. queue_ra_store(struct request_queue *q, const char *page, size_t count)
  3226. {
  3227. unsigned long ra_kb;
  3228. ssize_t ret = queue_var_store(&ra_kb, page, count);
  3229. spin_lock_irq(q->queue_lock);
  3230. if (ra_kb > (q->max_sectors >> 1))
  3231. ra_kb = (q->max_sectors >> 1);
  3232. q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
  3233. spin_unlock_irq(q->queue_lock);
  3234. return ret;
  3235. }
  3236. static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
  3237. {
  3238. int max_sectors_kb = q->max_sectors >> 1;
  3239. return queue_var_show(max_sectors_kb, (page));
  3240. }
  3241. static ssize_t
  3242. queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
  3243. {
  3244. unsigned long max_sectors_kb,
  3245. max_hw_sectors_kb = q->max_hw_sectors >> 1,
  3246. page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
  3247. ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
  3248. int ra_kb;
  3249. if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
  3250. return -EINVAL;
  3251. /*
  3252. * Take the queue lock to update the readahead and max_sectors
  3253. * values synchronously:
  3254. */
  3255. spin_lock_irq(q->queue_lock);
  3256. /*
  3257. * Trim readahead window as well, if necessary:
  3258. */
  3259. ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
  3260. if (ra_kb > max_sectors_kb)
  3261. q->backing_dev_info.ra_pages =
  3262. max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
  3263. q->max_sectors = max_sectors_kb << 1;
  3264. spin_unlock_irq(q->queue_lock);
  3265. return ret;
  3266. }
  3267. static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
  3268. {
  3269. int max_hw_sectors_kb = q->max_hw_sectors >> 1;
  3270. return queue_var_show(max_hw_sectors_kb, (page));
  3271. }
  3272. static struct queue_sysfs_entry queue_requests_entry = {
  3273. .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
  3274. .show = queue_requests_show,
  3275. .store = queue_requests_store,
  3276. };
  3277. static struct queue_sysfs_entry queue_ra_entry = {
  3278. .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
  3279. .show = queue_ra_show,
  3280. .store = queue_ra_store,
  3281. };
  3282. static struct queue_sysfs_entry queue_max_sectors_entry = {
  3283. .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
  3284. .show = queue_max_sectors_show,
  3285. .store = queue_max_sectors_store,
  3286. };
  3287. static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
  3288. .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
  3289. .show = queue_max_hw_sectors_show,
  3290. };
  3291. static struct queue_sysfs_entry queue_iosched_entry = {
  3292. .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
  3293. .show = elv_iosched_show,
  3294. .store = elv_iosched_store,
  3295. };
  3296. static struct attribute *default_attrs[] = {
  3297. &queue_requests_entry.attr,
  3298. &queue_ra_entry.attr,
  3299. &queue_max_hw_sectors_entry.attr,
  3300. &queue_max_sectors_entry.attr,
  3301. &queue_iosched_entry.attr,
  3302. NULL,
  3303. };
  3304. #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
  3305. static ssize_t
  3306. queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  3307. {
  3308. struct queue_sysfs_entry *entry = to_queue(attr);
  3309. request_queue_t *q = container_of(kobj, struct request_queue, kobj);
  3310. ssize_t res;
  3311. if (!entry->show)
  3312. return -EIO;
  3313. mutex_lock(&q->sysfs_lock);
  3314. if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
  3315. mutex_unlock(&q->sysfs_lock);
  3316. return -ENOENT;
  3317. }
  3318. res = entry->show(q, page);
  3319. mutex_unlock(&q->sysfs_lock);
  3320. return res;
  3321. }
  3322. static ssize_t
  3323. queue_attr_store(struct kobject *kobj, struct attribute *attr,
  3324. const char *page, size_t length)
  3325. {
  3326. struct queue_sysfs_entry *entry = to_queue(attr);
  3327. request_queue_t *q = container_of(kobj, struct request_queue, kobj);
  3328. ssize_t res;
  3329. if (!entry->store)
  3330. return -EIO;
  3331. mutex_lock(&q->sysfs_lock);
  3332. if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
  3333. mutex_unlock(&q->sysfs_lock);
  3334. return -ENOENT;
  3335. }
  3336. res = entry->store(q, page, length);
  3337. mutex_unlock(&q->sysfs_lock);
  3338. return res;
  3339. }
  3340. static struct sysfs_ops queue_sysfs_ops = {
  3341. .show = queue_attr_show,
  3342. .store = queue_attr_store,
  3343. };
  3344. static struct kobj_type queue_ktype = {
  3345. .sysfs_ops = &queue_sysfs_ops,
  3346. .default_attrs = default_attrs,
  3347. .release = blk_release_queue,
  3348. };
  3349. int blk_register_queue(struct gendisk *disk)
  3350. {
  3351. int ret;
  3352. request_queue_t *q = disk->queue;
  3353. if (!q || !q->request_fn)
  3354. return -ENXIO;
  3355. q->kobj.parent = kobject_get(&disk->kobj);
  3356. ret = kobject_add(&q->kobj);
  3357. if (ret < 0)
  3358. return ret;
  3359. kobject_uevent(&q->kobj, KOBJ_ADD);
  3360. ret = elv_register_queue(q);
  3361. if (ret) {
  3362. kobject_uevent(&q->kobj, KOBJ_REMOVE);
  3363. kobject_del(&q->kobj);
  3364. return ret;
  3365. }
  3366. return 0;
  3367. }
  3368. void blk_unregister_queue(struct gendisk *disk)
  3369. {
  3370. request_queue_t *q = disk->queue;
  3371. if (q && q->request_fn) {
  3372. elv_unregister_queue(q);
  3373. kobject_uevent(&q->kobj, KOBJ_REMOVE);
  3374. kobject_del(&q->kobj);
  3375. kobject_put(&disk->kobj);
  3376. }
  3377. }