raid5.c 137 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983
  1. /*
  2. * raid5.c : Multiple Devices driver for Linux
  3. * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
  4. * Copyright (C) 1999, 2000 Ingo Molnar
  5. * Copyright (C) 2002, 2003 H. Peter Anvin
  6. *
  7. * RAID-4/5/6 management functions.
  8. * Thanks to Penguin Computing for making the RAID-6 development possible
  9. * by donating a test server!
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * (for example /usr/src/linux/COPYING); if not, write to the Free
  18. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19. */
  20. /*
  21. * BITMAP UNPLUGGING:
  22. *
  23. * The sequencing for updating the bitmap reliably is a little
  24. * subtle (and I got it wrong the first time) so it deserves some
  25. * explanation.
  26. *
  27. * We group bitmap updates into batches. Each batch has a number.
  28. * We may write out several batches at once, but that isn't very important.
  29. * conf->bm_write is the number of the last batch successfully written.
  30. * conf->bm_flush is the number of the last batch that was closed to
  31. * new additions.
  32. * When we discover that we will need to write to any block in a stripe
  33. * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
  34. * the number of the batch it will be in. This is bm_flush+1.
  35. * When we are ready to do a write, if that batch hasn't been written yet,
  36. * we plug the array and queue the stripe for later.
  37. * When an unplug happens, we increment bm_flush, thus closing the current
  38. * batch.
  39. * When we notice that bm_flush > bm_write, we write out all pending updates
  40. * to the bitmap, and advance bm_write to where bm_flush was.
  41. * This may occasionally write a bit out twice, but is sure never to
  42. * miss any bits.
  43. */
  44. #include <linux/blkdev.h>
  45. #include <linux/kthread.h>
  46. #include <linux/async_tx.h>
  47. #include <linux/seq_file.h>
  48. #include "md.h"
  49. #include "raid5.h"
  50. #include "raid6.h"
  51. #include "bitmap.h"
  52. /*
  53. * Stripe cache
  54. */
  55. #define NR_STRIPES 256
  56. #define STRIPE_SIZE PAGE_SIZE
  57. #define STRIPE_SHIFT (PAGE_SHIFT - 9)
  58. #define STRIPE_SECTORS (STRIPE_SIZE>>9)
  59. #define IO_THRESHOLD 1
  60. #define BYPASS_THRESHOLD 1
  61. #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
  62. #define HASH_MASK (NR_HASH - 1)
  63. #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
  64. /* bio's attached to a stripe+device for I/O are linked together in bi_sector
  65. * order without overlap. There may be several bio's per stripe+device, and
  66. * a bio could span several devices.
  67. * When walking this list for a particular stripe+device, we must never proceed
  68. * beyond a bio that extends past this device, as the next bio might no longer
  69. * be valid.
  70. * This macro is used to determine the 'next' bio in the list, given the sector
  71. * of the current stripe+device
  72. */
  73. #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
  74. /*
  75. * The following can be used to debug the driver
  76. */
  77. #define RAID5_PARANOIA 1
  78. #if RAID5_PARANOIA && defined(CONFIG_SMP)
  79. # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
  80. #else
  81. # define CHECK_DEVLOCK()
  82. #endif
  83. #ifdef DEBUG
  84. #define inline
  85. #define __inline__
  86. #endif
  87. #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
  88. #if !RAID6_USE_EMPTY_ZERO_PAGE
  89. /* In .bss so it's zeroed */
  90. const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
  91. #endif
  92. /*
  93. * We maintain a biased count of active stripes in the bottom 16 bits of
  94. * bi_phys_segments, and a count of processed stripes in the upper 16 bits
  95. */
  96. static inline int raid5_bi_phys_segments(struct bio *bio)
  97. {
  98. return bio->bi_phys_segments & 0xffff;
  99. }
  100. static inline int raid5_bi_hw_segments(struct bio *bio)
  101. {
  102. return (bio->bi_phys_segments >> 16) & 0xffff;
  103. }
  104. static inline int raid5_dec_bi_phys_segments(struct bio *bio)
  105. {
  106. --bio->bi_phys_segments;
  107. return raid5_bi_phys_segments(bio);
  108. }
  109. static inline int raid5_dec_bi_hw_segments(struct bio *bio)
  110. {
  111. unsigned short val = raid5_bi_hw_segments(bio);
  112. --val;
  113. bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
  114. return val;
  115. }
  116. static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
  117. {
  118. bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
  119. }
  120. /* Find first data disk in a raid6 stripe */
  121. static inline int raid6_d0(struct stripe_head *sh)
  122. {
  123. if (sh->ddf_layout)
  124. /* ddf always start from first device */
  125. return 0;
  126. /* md starts just after Q block */
  127. if (sh->qd_idx == sh->disks - 1)
  128. return 0;
  129. else
  130. return sh->qd_idx + 1;
  131. }
  132. static inline int raid6_next_disk(int disk, int raid_disks)
  133. {
  134. disk++;
  135. return (disk < raid_disks) ? disk : 0;
  136. }
  137. /* When walking through the disks in a raid5, starting at raid6_d0,
  138. * We need to map each disk to a 'slot', where the data disks are slot
  139. * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
  140. * is raid_disks-1. This help does that mapping.
  141. */
  142. static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
  143. int *count, int syndrome_disks)
  144. {
  145. int slot;
  146. if (idx == sh->pd_idx)
  147. return syndrome_disks;
  148. if (idx == sh->qd_idx)
  149. return syndrome_disks + 1;
  150. slot = (*count)++;
  151. return slot;
  152. }
  153. static void return_io(struct bio *return_bi)
  154. {
  155. struct bio *bi = return_bi;
  156. while (bi) {
  157. return_bi = bi->bi_next;
  158. bi->bi_next = NULL;
  159. bi->bi_size = 0;
  160. bio_endio(bi, 0);
  161. bi = return_bi;
  162. }
  163. }
  164. static void print_raid5_conf (raid5_conf_t *conf);
  165. static int stripe_operations_active(struct stripe_head *sh)
  166. {
  167. return sh->check_state || sh->reconstruct_state ||
  168. test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
  169. test_bit(STRIPE_COMPUTE_RUN, &sh->state);
  170. }
  171. static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
  172. {
  173. if (atomic_dec_and_test(&sh->count)) {
  174. BUG_ON(!list_empty(&sh->lru));
  175. BUG_ON(atomic_read(&conf->active_stripes)==0);
  176. if (test_bit(STRIPE_HANDLE, &sh->state)) {
  177. if (test_bit(STRIPE_DELAYED, &sh->state)) {
  178. list_add_tail(&sh->lru, &conf->delayed_list);
  179. blk_plug_device(conf->mddev->queue);
  180. } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
  181. sh->bm_seq - conf->seq_write > 0) {
  182. list_add_tail(&sh->lru, &conf->bitmap_list);
  183. blk_plug_device(conf->mddev->queue);
  184. } else {
  185. clear_bit(STRIPE_BIT_DELAY, &sh->state);
  186. list_add_tail(&sh->lru, &conf->handle_list);
  187. }
  188. md_wakeup_thread(conf->mddev->thread);
  189. } else {
  190. BUG_ON(stripe_operations_active(sh));
  191. if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
  192. atomic_dec(&conf->preread_active_stripes);
  193. if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
  194. md_wakeup_thread(conf->mddev->thread);
  195. }
  196. atomic_dec(&conf->active_stripes);
  197. if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
  198. list_add_tail(&sh->lru, &conf->inactive_list);
  199. wake_up(&conf->wait_for_stripe);
  200. if (conf->retry_read_aligned)
  201. md_wakeup_thread(conf->mddev->thread);
  202. }
  203. }
  204. }
  205. }
  206. static void release_stripe(struct stripe_head *sh)
  207. {
  208. raid5_conf_t *conf = sh->raid_conf;
  209. unsigned long flags;
  210. spin_lock_irqsave(&conf->device_lock, flags);
  211. __release_stripe(conf, sh);
  212. spin_unlock_irqrestore(&conf->device_lock, flags);
  213. }
  214. static inline void remove_hash(struct stripe_head *sh)
  215. {
  216. pr_debug("remove_hash(), stripe %llu\n",
  217. (unsigned long long)sh->sector);
  218. hlist_del_init(&sh->hash);
  219. }
  220. static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
  221. {
  222. struct hlist_head *hp = stripe_hash(conf, sh->sector);
  223. pr_debug("insert_hash(), stripe %llu\n",
  224. (unsigned long long)sh->sector);
  225. CHECK_DEVLOCK();
  226. hlist_add_head(&sh->hash, hp);
  227. }
  228. /* find an idle stripe, make sure it is unhashed, and return it. */
  229. static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
  230. {
  231. struct stripe_head *sh = NULL;
  232. struct list_head *first;
  233. CHECK_DEVLOCK();
  234. if (list_empty(&conf->inactive_list))
  235. goto out;
  236. first = conf->inactive_list.next;
  237. sh = list_entry(first, struct stripe_head, lru);
  238. list_del_init(first);
  239. remove_hash(sh);
  240. atomic_inc(&conf->active_stripes);
  241. out:
  242. return sh;
  243. }
  244. static void shrink_buffers(struct stripe_head *sh, int num)
  245. {
  246. struct page *p;
  247. int i;
  248. for (i=0; i<num ; i++) {
  249. p = sh->dev[i].page;
  250. if (!p)
  251. continue;
  252. sh->dev[i].page = NULL;
  253. put_page(p);
  254. }
  255. }
  256. static int grow_buffers(struct stripe_head *sh, int num)
  257. {
  258. int i;
  259. for (i=0; i<num; i++) {
  260. struct page *page;
  261. if (!(page = alloc_page(GFP_KERNEL))) {
  262. return 1;
  263. }
  264. sh->dev[i].page = page;
  265. }
  266. return 0;
  267. }
  268. static void raid5_build_block(struct stripe_head *sh, int i);
  269. static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
  270. struct stripe_head *sh);
  271. static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
  272. {
  273. raid5_conf_t *conf = sh->raid_conf;
  274. int i;
  275. BUG_ON(atomic_read(&sh->count) != 0);
  276. BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
  277. BUG_ON(stripe_operations_active(sh));
  278. CHECK_DEVLOCK();
  279. pr_debug("init_stripe called, stripe %llu\n",
  280. (unsigned long long)sh->sector);
  281. remove_hash(sh);
  282. sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
  283. sh->sector = sector;
  284. stripe_set_idx(sector, conf, previous, sh);
  285. sh->state = 0;
  286. for (i = sh->disks; i--; ) {
  287. struct r5dev *dev = &sh->dev[i];
  288. if (dev->toread || dev->read || dev->towrite || dev->written ||
  289. test_bit(R5_LOCKED, &dev->flags)) {
  290. printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
  291. (unsigned long long)sh->sector, i, dev->toread,
  292. dev->read, dev->towrite, dev->written,
  293. test_bit(R5_LOCKED, &dev->flags));
  294. BUG();
  295. }
  296. dev->flags = 0;
  297. raid5_build_block(sh, i);
  298. }
  299. insert_hash(conf, sh);
  300. }
  301. static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks)
  302. {
  303. struct stripe_head *sh;
  304. struct hlist_node *hn;
  305. CHECK_DEVLOCK();
  306. pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
  307. hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
  308. if (sh->sector == sector && sh->disks == disks)
  309. return sh;
  310. pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
  311. return NULL;
  312. }
  313. static void unplug_slaves(mddev_t *mddev);
  314. static void raid5_unplug_device(struct request_queue *q);
  315. static struct stripe_head *
  316. get_active_stripe(raid5_conf_t *conf, sector_t sector,
  317. int previous, int noblock)
  318. {
  319. struct stripe_head *sh;
  320. int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
  321. pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
  322. spin_lock_irq(&conf->device_lock);
  323. do {
  324. wait_event_lock_irq(conf->wait_for_stripe,
  325. conf->quiesce == 0,
  326. conf->device_lock, /* nothing */);
  327. sh = __find_stripe(conf, sector, disks);
  328. if (!sh) {
  329. if (!conf->inactive_blocked)
  330. sh = get_free_stripe(conf);
  331. if (noblock && sh == NULL)
  332. break;
  333. if (!sh) {
  334. conf->inactive_blocked = 1;
  335. wait_event_lock_irq(conf->wait_for_stripe,
  336. !list_empty(&conf->inactive_list) &&
  337. (atomic_read(&conf->active_stripes)
  338. < (conf->max_nr_stripes *3/4)
  339. || !conf->inactive_blocked),
  340. conf->device_lock,
  341. raid5_unplug_device(conf->mddev->queue)
  342. );
  343. conf->inactive_blocked = 0;
  344. } else
  345. init_stripe(sh, sector, previous);
  346. } else {
  347. if (atomic_read(&sh->count)) {
  348. BUG_ON(!list_empty(&sh->lru));
  349. } else {
  350. if (!test_bit(STRIPE_HANDLE, &sh->state))
  351. atomic_inc(&conf->active_stripes);
  352. if (list_empty(&sh->lru) &&
  353. !test_bit(STRIPE_EXPANDING, &sh->state))
  354. BUG();
  355. list_del_init(&sh->lru);
  356. }
  357. }
  358. } while (sh == NULL);
  359. if (sh)
  360. atomic_inc(&sh->count);
  361. spin_unlock_irq(&conf->device_lock);
  362. return sh;
  363. }
  364. static void
  365. raid5_end_read_request(struct bio *bi, int error);
  366. static void
  367. raid5_end_write_request(struct bio *bi, int error);
  368. static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
  369. {
  370. raid5_conf_t *conf = sh->raid_conf;
  371. int i, disks = sh->disks;
  372. might_sleep();
  373. for (i = disks; i--; ) {
  374. int rw;
  375. struct bio *bi;
  376. mdk_rdev_t *rdev;
  377. if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
  378. rw = WRITE;
  379. else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
  380. rw = READ;
  381. else
  382. continue;
  383. bi = &sh->dev[i].req;
  384. bi->bi_rw = rw;
  385. if (rw == WRITE)
  386. bi->bi_end_io = raid5_end_write_request;
  387. else
  388. bi->bi_end_io = raid5_end_read_request;
  389. rcu_read_lock();
  390. rdev = rcu_dereference(conf->disks[i].rdev);
  391. if (rdev && test_bit(Faulty, &rdev->flags))
  392. rdev = NULL;
  393. if (rdev)
  394. atomic_inc(&rdev->nr_pending);
  395. rcu_read_unlock();
  396. if (rdev) {
  397. if (s->syncing || s->expanding || s->expanded)
  398. md_sync_acct(rdev->bdev, STRIPE_SECTORS);
  399. set_bit(STRIPE_IO_STARTED, &sh->state);
  400. bi->bi_bdev = rdev->bdev;
  401. pr_debug("%s: for %llu schedule op %ld on disc %d\n",
  402. __func__, (unsigned long long)sh->sector,
  403. bi->bi_rw, i);
  404. atomic_inc(&sh->count);
  405. bi->bi_sector = sh->sector + rdev->data_offset;
  406. bi->bi_flags = 1 << BIO_UPTODATE;
  407. bi->bi_vcnt = 1;
  408. bi->bi_max_vecs = 1;
  409. bi->bi_idx = 0;
  410. bi->bi_io_vec = &sh->dev[i].vec;
  411. bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
  412. bi->bi_io_vec[0].bv_offset = 0;
  413. bi->bi_size = STRIPE_SIZE;
  414. bi->bi_next = NULL;
  415. if (rw == WRITE &&
  416. test_bit(R5_ReWrite, &sh->dev[i].flags))
  417. atomic_add(STRIPE_SECTORS,
  418. &rdev->corrected_errors);
  419. generic_make_request(bi);
  420. } else {
  421. if (rw == WRITE)
  422. set_bit(STRIPE_DEGRADED, &sh->state);
  423. pr_debug("skip op %ld on disc %d for sector %llu\n",
  424. bi->bi_rw, i, (unsigned long long)sh->sector);
  425. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  426. set_bit(STRIPE_HANDLE, &sh->state);
  427. }
  428. }
  429. }
  430. static struct dma_async_tx_descriptor *
  431. async_copy_data(int frombio, struct bio *bio, struct page *page,
  432. sector_t sector, struct dma_async_tx_descriptor *tx)
  433. {
  434. struct bio_vec *bvl;
  435. struct page *bio_page;
  436. int i;
  437. int page_offset;
  438. if (bio->bi_sector >= sector)
  439. page_offset = (signed)(bio->bi_sector - sector) * 512;
  440. else
  441. page_offset = (signed)(sector - bio->bi_sector) * -512;
  442. bio_for_each_segment(bvl, bio, i) {
  443. int len = bio_iovec_idx(bio, i)->bv_len;
  444. int clen;
  445. int b_offset = 0;
  446. if (page_offset < 0) {
  447. b_offset = -page_offset;
  448. page_offset += b_offset;
  449. len -= b_offset;
  450. }
  451. if (len > 0 && page_offset + len > STRIPE_SIZE)
  452. clen = STRIPE_SIZE - page_offset;
  453. else
  454. clen = len;
  455. if (clen > 0) {
  456. b_offset += bio_iovec_idx(bio, i)->bv_offset;
  457. bio_page = bio_iovec_idx(bio, i)->bv_page;
  458. if (frombio)
  459. tx = async_memcpy(page, bio_page, page_offset,
  460. b_offset, clen,
  461. ASYNC_TX_DEP_ACK,
  462. tx, NULL, NULL);
  463. else
  464. tx = async_memcpy(bio_page, page, b_offset,
  465. page_offset, clen,
  466. ASYNC_TX_DEP_ACK,
  467. tx, NULL, NULL);
  468. }
  469. if (clen < len) /* hit end of page */
  470. break;
  471. page_offset += len;
  472. }
  473. return tx;
  474. }
  475. static void ops_complete_biofill(void *stripe_head_ref)
  476. {
  477. struct stripe_head *sh = stripe_head_ref;
  478. struct bio *return_bi = NULL;
  479. raid5_conf_t *conf = sh->raid_conf;
  480. int i;
  481. pr_debug("%s: stripe %llu\n", __func__,
  482. (unsigned long long)sh->sector);
  483. /* clear completed biofills */
  484. spin_lock_irq(&conf->device_lock);
  485. for (i = sh->disks; i--; ) {
  486. struct r5dev *dev = &sh->dev[i];
  487. /* acknowledge completion of a biofill operation */
  488. /* and check if we need to reply to a read request,
  489. * new R5_Wantfill requests are held off until
  490. * !STRIPE_BIOFILL_RUN
  491. */
  492. if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
  493. struct bio *rbi, *rbi2;
  494. BUG_ON(!dev->read);
  495. rbi = dev->read;
  496. dev->read = NULL;
  497. while (rbi && rbi->bi_sector <
  498. dev->sector + STRIPE_SECTORS) {
  499. rbi2 = r5_next_bio(rbi, dev->sector);
  500. if (!raid5_dec_bi_phys_segments(rbi)) {
  501. rbi->bi_next = return_bi;
  502. return_bi = rbi;
  503. }
  504. rbi = rbi2;
  505. }
  506. }
  507. }
  508. spin_unlock_irq(&conf->device_lock);
  509. clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
  510. return_io(return_bi);
  511. set_bit(STRIPE_HANDLE, &sh->state);
  512. release_stripe(sh);
  513. }
  514. static void ops_run_biofill(struct stripe_head *sh)
  515. {
  516. struct dma_async_tx_descriptor *tx = NULL;
  517. raid5_conf_t *conf = sh->raid_conf;
  518. int i;
  519. pr_debug("%s: stripe %llu\n", __func__,
  520. (unsigned long long)sh->sector);
  521. for (i = sh->disks; i--; ) {
  522. struct r5dev *dev = &sh->dev[i];
  523. if (test_bit(R5_Wantfill, &dev->flags)) {
  524. struct bio *rbi;
  525. spin_lock_irq(&conf->device_lock);
  526. dev->read = rbi = dev->toread;
  527. dev->toread = NULL;
  528. spin_unlock_irq(&conf->device_lock);
  529. while (rbi && rbi->bi_sector <
  530. dev->sector + STRIPE_SECTORS) {
  531. tx = async_copy_data(0, rbi, dev->page,
  532. dev->sector, tx);
  533. rbi = r5_next_bio(rbi, dev->sector);
  534. }
  535. }
  536. }
  537. atomic_inc(&sh->count);
  538. async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
  539. ops_complete_biofill, sh);
  540. }
  541. static void ops_complete_compute5(void *stripe_head_ref)
  542. {
  543. struct stripe_head *sh = stripe_head_ref;
  544. int target = sh->ops.target;
  545. struct r5dev *tgt = &sh->dev[target];
  546. pr_debug("%s: stripe %llu\n", __func__,
  547. (unsigned long long)sh->sector);
  548. set_bit(R5_UPTODATE, &tgt->flags);
  549. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  550. clear_bit(R5_Wantcompute, &tgt->flags);
  551. clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
  552. if (sh->check_state == check_state_compute_run)
  553. sh->check_state = check_state_compute_result;
  554. set_bit(STRIPE_HANDLE, &sh->state);
  555. release_stripe(sh);
  556. }
  557. static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh)
  558. {
  559. /* kernel stack size limits the total number of disks */
  560. int disks = sh->disks;
  561. struct page *xor_srcs[disks];
  562. int target = sh->ops.target;
  563. struct r5dev *tgt = &sh->dev[target];
  564. struct page *xor_dest = tgt->page;
  565. int count = 0;
  566. struct dma_async_tx_descriptor *tx;
  567. int i;
  568. pr_debug("%s: stripe %llu block: %d\n",
  569. __func__, (unsigned long long)sh->sector, target);
  570. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  571. for (i = disks; i--; )
  572. if (i != target)
  573. xor_srcs[count++] = sh->dev[i].page;
  574. atomic_inc(&sh->count);
  575. if (unlikely(count == 1))
  576. tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
  577. 0, NULL, ops_complete_compute5, sh);
  578. else
  579. tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
  580. ASYNC_TX_XOR_ZERO_DST, NULL,
  581. ops_complete_compute5, sh);
  582. return tx;
  583. }
  584. static void ops_complete_prexor(void *stripe_head_ref)
  585. {
  586. struct stripe_head *sh = stripe_head_ref;
  587. pr_debug("%s: stripe %llu\n", __func__,
  588. (unsigned long long)sh->sector);
  589. }
  590. static struct dma_async_tx_descriptor *
  591. ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
  592. {
  593. /* kernel stack size limits the total number of disks */
  594. int disks = sh->disks;
  595. struct page *xor_srcs[disks];
  596. int count = 0, pd_idx = sh->pd_idx, i;
  597. /* existing parity data subtracted */
  598. struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
  599. pr_debug("%s: stripe %llu\n", __func__,
  600. (unsigned long long)sh->sector);
  601. for (i = disks; i--; ) {
  602. struct r5dev *dev = &sh->dev[i];
  603. /* Only process blocks that are known to be uptodate */
  604. if (test_bit(R5_Wantdrain, &dev->flags))
  605. xor_srcs[count++] = dev->page;
  606. }
  607. tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
  608. ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx,
  609. ops_complete_prexor, sh);
  610. return tx;
  611. }
  612. static struct dma_async_tx_descriptor *
  613. ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
  614. {
  615. int disks = sh->disks;
  616. int i;
  617. pr_debug("%s: stripe %llu\n", __func__,
  618. (unsigned long long)sh->sector);
  619. for (i = disks; i--; ) {
  620. struct r5dev *dev = &sh->dev[i];
  621. struct bio *chosen;
  622. if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
  623. struct bio *wbi;
  624. spin_lock(&sh->lock);
  625. chosen = dev->towrite;
  626. dev->towrite = NULL;
  627. BUG_ON(dev->written);
  628. wbi = dev->written = chosen;
  629. spin_unlock(&sh->lock);
  630. while (wbi && wbi->bi_sector <
  631. dev->sector + STRIPE_SECTORS) {
  632. tx = async_copy_data(1, wbi, dev->page,
  633. dev->sector, tx);
  634. wbi = r5_next_bio(wbi, dev->sector);
  635. }
  636. }
  637. }
  638. return tx;
  639. }
  640. static void ops_complete_postxor(void *stripe_head_ref)
  641. {
  642. struct stripe_head *sh = stripe_head_ref;
  643. int disks = sh->disks, i, pd_idx = sh->pd_idx;
  644. pr_debug("%s: stripe %llu\n", __func__,
  645. (unsigned long long)sh->sector);
  646. for (i = disks; i--; ) {
  647. struct r5dev *dev = &sh->dev[i];
  648. if (dev->written || i == pd_idx)
  649. set_bit(R5_UPTODATE, &dev->flags);
  650. }
  651. if (sh->reconstruct_state == reconstruct_state_drain_run)
  652. sh->reconstruct_state = reconstruct_state_drain_result;
  653. else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
  654. sh->reconstruct_state = reconstruct_state_prexor_drain_result;
  655. else {
  656. BUG_ON(sh->reconstruct_state != reconstruct_state_run);
  657. sh->reconstruct_state = reconstruct_state_result;
  658. }
  659. set_bit(STRIPE_HANDLE, &sh->state);
  660. release_stripe(sh);
  661. }
  662. static void
  663. ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
  664. {
  665. /* kernel stack size limits the total number of disks */
  666. int disks = sh->disks;
  667. struct page *xor_srcs[disks];
  668. int count = 0, pd_idx = sh->pd_idx, i;
  669. struct page *xor_dest;
  670. int prexor = 0;
  671. unsigned long flags;
  672. pr_debug("%s: stripe %llu\n", __func__,
  673. (unsigned long long)sh->sector);
  674. /* check if prexor is active which means only process blocks
  675. * that are part of a read-modify-write (written)
  676. */
  677. if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
  678. prexor = 1;
  679. xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
  680. for (i = disks; i--; ) {
  681. struct r5dev *dev = &sh->dev[i];
  682. if (dev->written)
  683. xor_srcs[count++] = dev->page;
  684. }
  685. } else {
  686. xor_dest = sh->dev[pd_idx].page;
  687. for (i = disks; i--; ) {
  688. struct r5dev *dev = &sh->dev[i];
  689. if (i != pd_idx)
  690. xor_srcs[count++] = dev->page;
  691. }
  692. }
  693. /* 1/ if we prexor'd then the dest is reused as a source
  694. * 2/ if we did not prexor then we are redoing the parity
  695. * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
  696. * for the synchronous xor case
  697. */
  698. flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK |
  699. (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
  700. atomic_inc(&sh->count);
  701. if (unlikely(count == 1)) {
  702. flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST);
  703. tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
  704. flags, tx, ops_complete_postxor, sh);
  705. } else
  706. tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
  707. flags, tx, ops_complete_postxor, sh);
  708. }
  709. static void ops_complete_check(void *stripe_head_ref)
  710. {
  711. struct stripe_head *sh = stripe_head_ref;
  712. pr_debug("%s: stripe %llu\n", __func__,
  713. (unsigned long long)sh->sector);
  714. sh->check_state = check_state_check_result;
  715. set_bit(STRIPE_HANDLE, &sh->state);
  716. release_stripe(sh);
  717. }
  718. static void ops_run_check(struct stripe_head *sh)
  719. {
  720. /* kernel stack size limits the total number of disks */
  721. int disks = sh->disks;
  722. struct page *xor_srcs[disks];
  723. struct dma_async_tx_descriptor *tx;
  724. int count = 0, pd_idx = sh->pd_idx, i;
  725. struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
  726. pr_debug("%s: stripe %llu\n", __func__,
  727. (unsigned long long)sh->sector);
  728. for (i = disks; i--; ) {
  729. struct r5dev *dev = &sh->dev[i];
  730. if (i != pd_idx)
  731. xor_srcs[count++] = dev->page;
  732. }
  733. tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
  734. &sh->ops.zero_sum_result, 0, NULL, NULL, NULL);
  735. atomic_inc(&sh->count);
  736. tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
  737. ops_complete_check, sh);
  738. }
  739. static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request)
  740. {
  741. int overlap_clear = 0, i, disks = sh->disks;
  742. struct dma_async_tx_descriptor *tx = NULL;
  743. if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
  744. ops_run_biofill(sh);
  745. overlap_clear++;
  746. }
  747. if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
  748. tx = ops_run_compute5(sh);
  749. /* terminate the chain if postxor is not set to be run */
  750. if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request))
  751. async_tx_ack(tx);
  752. }
  753. if (test_bit(STRIPE_OP_PREXOR, &ops_request))
  754. tx = ops_run_prexor(sh, tx);
  755. if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
  756. tx = ops_run_biodrain(sh, tx);
  757. overlap_clear++;
  758. }
  759. if (test_bit(STRIPE_OP_POSTXOR, &ops_request))
  760. ops_run_postxor(sh, tx);
  761. if (test_bit(STRIPE_OP_CHECK, &ops_request))
  762. ops_run_check(sh);
  763. if (overlap_clear)
  764. for (i = disks; i--; ) {
  765. struct r5dev *dev = &sh->dev[i];
  766. if (test_and_clear_bit(R5_Overlap, &dev->flags))
  767. wake_up(&sh->raid_conf->wait_for_overlap);
  768. }
  769. }
  770. static int grow_one_stripe(raid5_conf_t *conf)
  771. {
  772. struct stripe_head *sh;
  773. sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
  774. if (!sh)
  775. return 0;
  776. memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
  777. sh->raid_conf = conf;
  778. spin_lock_init(&sh->lock);
  779. if (grow_buffers(sh, conf->raid_disks)) {
  780. shrink_buffers(sh, conf->raid_disks);
  781. kmem_cache_free(conf->slab_cache, sh);
  782. return 0;
  783. }
  784. sh->disks = conf->raid_disks;
  785. /* we just created an active stripe so... */
  786. atomic_set(&sh->count, 1);
  787. atomic_inc(&conf->active_stripes);
  788. INIT_LIST_HEAD(&sh->lru);
  789. release_stripe(sh);
  790. return 1;
  791. }
  792. static int grow_stripes(raid5_conf_t *conf, int num)
  793. {
  794. struct kmem_cache *sc;
  795. int devs = conf->raid_disks;
  796. sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev));
  797. sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev));
  798. conf->active_name = 0;
  799. sc = kmem_cache_create(conf->cache_name[conf->active_name],
  800. sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
  801. 0, 0, NULL);
  802. if (!sc)
  803. return 1;
  804. conf->slab_cache = sc;
  805. conf->pool_size = devs;
  806. while (num--)
  807. if (!grow_one_stripe(conf))
  808. return 1;
  809. return 0;
  810. }
  811. #ifdef CONFIG_MD_RAID5_RESHAPE
  812. static int resize_stripes(raid5_conf_t *conf, int newsize)
  813. {
  814. /* Make all the stripes able to hold 'newsize' devices.
  815. * New slots in each stripe get 'page' set to a new page.
  816. *
  817. * This happens in stages:
  818. * 1/ create a new kmem_cache and allocate the required number of
  819. * stripe_heads.
  820. * 2/ gather all the old stripe_heads and tranfer the pages across
  821. * to the new stripe_heads. This will have the side effect of
  822. * freezing the array as once all stripe_heads have been collected,
  823. * no IO will be possible. Old stripe heads are freed once their
  824. * pages have been transferred over, and the old kmem_cache is
  825. * freed when all stripes are done.
  826. * 3/ reallocate conf->disks to be suitable bigger. If this fails,
  827. * we simple return a failre status - no need to clean anything up.
  828. * 4/ allocate new pages for the new slots in the new stripe_heads.
  829. * If this fails, we don't bother trying the shrink the
  830. * stripe_heads down again, we just leave them as they are.
  831. * As each stripe_head is processed the new one is released into
  832. * active service.
  833. *
  834. * Once step2 is started, we cannot afford to wait for a write,
  835. * so we use GFP_NOIO allocations.
  836. */
  837. struct stripe_head *osh, *nsh;
  838. LIST_HEAD(newstripes);
  839. struct disk_info *ndisks;
  840. int err;
  841. struct kmem_cache *sc;
  842. int i;
  843. if (newsize <= conf->pool_size)
  844. return 0; /* never bother to shrink */
  845. err = md_allow_write(conf->mddev);
  846. if (err)
  847. return err;
  848. /* Step 1 */
  849. sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
  850. sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
  851. 0, 0, NULL);
  852. if (!sc)
  853. return -ENOMEM;
  854. for (i = conf->max_nr_stripes; i; i--) {
  855. nsh = kmem_cache_alloc(sc, GFP_KERNEL);
  856. if (!nsh)
  857. break;
  858. memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
  859. nsh->raid_conf = conf;
  860. spin_lock_init(&nsh->lock);
  861. list_add(&nsh->lru, &newstripes);
  862. }
  863. if (i) {
  864. /* didn't get enough, give up */
  865. while (!list_empty(&newstripes)) {
  866. nsh = list_entry(newstripes.next, struct stripe_head, lru);
  867. list_del(&nsh->lru);
  868. kmem_cache_free(sc, nsh);
  869. }
  870. kmem_cache_destroy(sc);
  871. return -ENOMEM;
  872. }
  873. /* Step 2 - Must use GFP_NOIO now.
  874. * OK, we have enough stripes, start collecting inactive
  875. * stripes and copying them over
  876. */
  877. list_for_each_entry(nsh, &newstripes, lru) {
  878. spin_lock_irq(&conf->device_lock);
  879. wait_event_lock_irq(conf->wait_for_stripe,
  880. !list_empty(&conf->inactive_list),
  881. conf->device_lock,
  882. unplug_slaves(conf->mddev)
  883. );
  884. osh = get_free_stripe(conf);
  885. spin_unlock_irq(&conf->device_lock);
  886. atomic_set(&nsh->count, 1);
  887. for(i=0; i<conf->pool_size; i++)
  888. nsh->dev[i].page = osh->dev[i].page;
  889. for( ; i<newsize; i++)
  890. nsh->dev[i].page = NULL;
  891. kmem_cache_free(conf->slab_cache, osh);
  892. }
  893. kmem_cache_destroy(conf->slab_cache);
  894. /* Step 3.
  895. * At this point, we are holding all the stripes so the array
  896. * is completely stalled, so now is a good time to resize
  897. * conf->disks.
  898. */
  899. ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
  900. if (ndisks) {
  901. for (i=0; i<conf->raid_disks; i++)
  902. ndisks[i] = conf->disks[i];
  903. kfree(conf->disks);
  904. conf->disks = ndisks;
  905. } else
  906. err = -ENOMEM;
  907. /* Step 4, return new stripes to service */
  908. while(!list_empty(&newstripes)) {
  909. nsh = list_entry(newstripes.next, struct stripe_head, lru);
  910. list_del_init(&nsh->lru);
  911. for (i=conf->raid_disks; i < newsize; i++)
  912. if (nsh->dev[i].page == NULL) {
  913. struct page *p = alloc_page(GFP_NOIO);
  914. nsh->dev[i].page = p;
  915. if (!p)
  916. err = -ENOMEM;
  917. }
  918. release_stripe(nsh);
  919. }
  920. /* critical section pass, GFP_NOIO no longer needed */
  921. conf->slab_cache = sc;
  922. conf->active_name = 1-conf->active_name;
  923. conf->pool_size = newsize;
  924. return err;
  925. }
  926. #endif
  927. static int drop_one_stripe(raid5_conf_t *conf)
  928. {
  929. struct stripe_head *sh;
  930. spin_lock_irq(&conf->device_lock);
  931. sh = get_free_stripe(conf);
  932. spin_unlock_irq(&conf->device_lock);
  933. if (!sh)
  934. return 0;
  935. BUG_ON(atomic_read(&sh->count));
  936. shrink_buffers(sh, conf->pool_size);
  937. kmem_cache_free(conf->slab_cache, sh);
  938. atomic_dec(&conf->active_stripes);
  939. return 1;
  940. }
  941. static void shrink_stripes(raid5_conf_t *conf)
  942. {
  943. while (drop_one_stripe(conf))
  944. ;
  945. if (conf->slab_cache)
  946. kmem_cache_destroy(conf->slab_cache);
  947. conf->slab_cache = NULL;
  948. }
  949. static void raid5_end_read_request(struct bio * bi, int error)
  950. {
  951. struct stripe_head *sh = bi->bi_private;
  952. raid5_conf_t *conf = sh->raid_conf;
  953. int disks = sh->disks, i;
  954. int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
  955. char b[BDEVNAME_SIZE];
  956. mdk_rdev_t *rdev;
  957. for (i=0 ; i<disks; i++)
  958. if (bi == &sh->dev[i].req)
  959. break;
  960. pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
  961. (unsigned long long)sh->sector, i, atomic_read(&sh->count),
  962. uptodate);
  963. if (i == disks) {
  964. BUG();
  965. return;
  966. }
  967. if (uptodate) {
  968. set_bit(R5_UPTODATE, &sh->dev[i].flags);
  969. if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
  970. rdev = conf->disks[i].rdev;
  971. printk_rl(KERN_INFO "raid5:%s: read error corrected"
  972. " (%lu sectors at %llu on %s)\n",
  973. mdname(conf->mddev), STRIPE_SECTORS,
  974. (unsigned long long)(sh->sector
  975. + rdev->data_offset),
  976. bdevname(rdev->bdev, b));
  977. clear_bit(R5_ReadError, &sh->dev[i].flags);
  978. clear_bit(R5_ReWrite, &sh->dev[i].flags);
  979. }
  980. if (atomic_read(&conf->disks[i].rdev->read_errors))
  981. atomic_set(&conf->disks[i].rdev->read_errors, 0);
  982. } else {
  983. const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
  984. int retry = 0;
  985. rdev = conf->disks[i].rdev;
  986. clear_bit(R5_UPTODATE, &sh->dev[i].flags);
  987. atomic_inc(&rdev->read_errors);
  988. if (conf->mddev->degraded)
  989. printk_rl(KERN_WARNING
  990. "raid5:%s: read error not correctable "
  991. "(sector %llu on %s).\n",
  992. mdname(conf->mddev),
  993. (unsigned long long)(sh->sector
  994. + rdev->data_offset),
  995. bdn);
  996. else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
  997. /* Oh, no!!! */
  998. printk_rl(KERN_WARNING
  999. "raid5:%s: read error NOT corrected!! "
  1000. "(sector %llu on %s).\n",
  1001. mdname(conf->mddev),
  1002. (unsigned long long)(sh->sector
  1003. + rdev->data_offset),
  1004. bdn);
  1005. else if (atomic_read(&rdev->read_errors)
  1006. > conf->max_nr_stripes)
  1007. printk(KERN_WARNING
  1008. "raid5:%s: Too many read errors, failing device %s.\n",
  1009. mdname(conf->mddev), bdn);
  1010. else
  1011. retry = 1;
  1012. if (retry)
  1013. set_bit(R5_ReadError, &sh->dev[i].flags);
  1014. else {
  1015. clear_bit(R5_ReadError, &sh->dev[i].flags);
  1016. clear_bit(R5_ReWrite, &sh->dev[i].flags);
  1017. md_error(conf->mddev, rdev);
  1018. }
  1019. }
  1020. rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
  1021. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  1022. set_bit(STRIPE_HANDLE, &sh->state);
  1023. release_stripe(sh);
  1024. }
  1025. static void raid5_end_write_request(struct bio *bi, int error)
  1026. {
  1027. struct stripe_head *sh = bi->bi_private;
  1028. raid5_conf_t *conf = sh->raid_conf;
  1029. int disks = sh->disks, i;
  1030. int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
  1031. for (i=0 ; i<disks; i++)
  1032. if (bi == &sh->dev[i].req)
  1033. break;
  1034. pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
  1035. (unsigned long long)sh->sector, i, atomic_read(&sh->count),
  1036. uptodate);
  1037. if (i == disks) {
  1038. BUG();
  1039. return;
  1040. }
  1041. if (!uptodate)
  1042. md_error(conf->mddev, conf->disks[i].rdev);
  1043. rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
  1044. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  1045. set_bit(STRIPE_HANDLE, &sh->state);
  1046. release_stripe(sh);
  1047. }
  1048. static sector_t compute_blocknr(struct stripe_head *sh, int i);
  1049. static void raid5_build_block(struct stripe_head *sh, int i)
  1050. {
  1051. struct r5dev *dev = &sh->dev[i];
  1052. bio_init(&dev->req);
  1053. dev->req.bi_io_vec = &dev->vec;
  1054. dev->req.bi_vcnt++;
  1055. dev->req.bi_max_vecs++;
  1056. dev->vec.bv_page = dev->page;
  1057. dev->vec.bv_len = STRIPE_SIZE;
  1058. dev->vec.bv_offset = 0;
  1059. dev->req.bi_sector = sh->sector;
  1060. dev->req.bi_private = sh;
  1061. dev->flags = 0;
  1062. dev->sector = compute_blocknr(sh, i);
  1063. }
  1064. static void error(mddev_t *mddev, mdk_rdev_t *rdev)
  1065. {
  1066. char b[BDEVNAME_SIZE];
  1067. raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
  1068. pr_debug("raid5: error called\n");
  1069. if (!test_bit(Faulty, &rdev->flags)) {
  1070. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1071. if (test_and_clear_bit(In_sync, &rdev->flags)) {
  1072. unsigned long flags;
  1073. spin_lock_irqsave(&conf->device_lock, flags);
  1074. mddev->degraded++;
  1075. spin_unlock_irqrestore(&conf->device_lock, flags);
  1076. /*
  1077. * if recovery was running, make sure it aborts.
  1078. */
  1079. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1080. }
  1081. set_bit(Faulty, &rdev->flags);
  1082. printk(KERN_ALERT
  1083. "raid5: Disk failure on %s, disabling device.\n"
  1084. "raid5: Operation continuing on %d devices.\n",
  1085. bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
  1086. }
  1087. }
  1088. /*
  1089. * Input: a 'big' sector number,
  1090. * Output: index of the data and parity disk, and the sector # in them.
  1091. */
  1092. static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
  1093. int previous, int *dd_idx,
  1094. struct stripe_head *sh)
  1095. {
  1096. long stripe;
  1097. unsigned long chunk_number;
  1098. unsigned int chunk_offset;
  1099. int pd_idx, qd_idx;
  1100. int ddf_layout = 0;
  1101. sector_t new_sector;
  1102. int sectors_per_chunk = conf->chunk_size >> 9;
  1103. int raid_disks = previous ? conf->previous_raid_disks
  1104. : conf->raid_disks;
  1105. int data_disks = raid_disks - conf->max_degraded;
  1106. /* First compute the information on this sector */
  1107. /*
  1108. * Compute the chunk number and the sector offset inside the chunk
  1109. */
  1110. chunk_offset = sector_div(r_sector, sectors_per_chunk);
  1111. chunk_number = r_sector;
  1112. BUG_ON(r_sector != chunk_number);
  1113. /*
  1114. * Compute the stripe number
  1115. */
  1116. stripe = chunk_number / data_disks;
  1117. /*
  1118. * Compute the data disk and parity disk indexes inside the stripe
  1119. */
  1120. *dd_idx = chunk_number % data_disks;
  1121. /*
  1122. * Select the parity disk based on the user selected algorithm.
  1123. */
  1124. pd_idx = qd_idx = ~0;
  1125. switch(conf->level) {
  1126. case 4:
  1127. pd_idx = data_disks;
  1128. break;
  1129. case 5:
  1130. switch (conf->algorithm) {
  1131. case ALGORITHM_LEFT_ASYMMETRIC:
  1132. pd_idx = data_disks - stripe % raid_disks;
  1133. if (*dd_idx >= pd_idx)
  1134. (*dd_idx)++;
  1135. break;
  1136. case ALGORITHM_RIGHT_ASYMMETRIC:
  1137. pd_idx = stripe % raid_disks;
  1138. if (*dd_idx >= pd_idx)
  1139. (*dd_idx)++;
  1140. break;
  1141. case ALGORITHM_LEFT_SYMMETRIC:
  1142. pd_idx = data_disks - stripe % raid_disks;
  1143. *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
  1144. break;
  1145. case ALGORITHM_RIGHT_SYMMETRIC:
  1146. pd_idx = stripe % raid_disks;
  1147. *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
  1148. break;
  1149. case ALGORITHM_PARITY_0:
  1150. pd_idx = 0;
  1151. (*dd_idx)++;
  1152. break;
  1153. case ALGORITHM_PARITY_N:
  1154. pd_idx = data_disks;
  1155. break;
  1156. default:
  1157. printk(KERN_ERR "raid5: unsupported algorithm %d\n",
  1158. conf->algorithm);
  1159. BUG();
  1160. }
  1161. break;
  1162. case 6:
  1163. switch (conf->algorithm) {
  1164. case ALGORITHM_LEFT_ASYMMETRIC:
  1165. pd_idx = raid_disks - 1 - (stripe % raid_disks);
  1166. qd_idx = pd_idx + 1;
  1167. if (pd_idx == raid_disks-1) {
  1168. (*dd_idx)++; /* Q D D D P */
  1169. qd_idx = 0;
  1170. } else if (*dd_idx >= pd_idx)
  1171. (*dd_idx) += 2; /* D D P Q D */
  1172. break;
  1173. case ALGORITHM_RIGHT_ASYMMETRIC:
  1174. pd_idx = stripe % raid_disks;
  1175. qd_idx = pd_idx + 1;
  1176. if (pd_idx == raid_disks-1) {
  1177. (*dd_idx)++; /* Q D D D P */
  1178. qd_idx = 0;
  1179. } else if (*dd_idx >= pd_idx)
  1180. (*dd_idx) += 2; /* D D P Q D */
  1181. break;
  1182. case ALGORITHM_LEFT_SYMMETRIC:
  1183. pd_idx = raid_disks - 1 - (stripe % raid_disks);
  1184. qd_idx = (pd_idx + 1) % raid_disks;
  1185. *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
  1186. break;
  1187. case ALGORITHM_RIGHT_SYMMETRIC:
  1188. pd_idx = stripe % raid_disks;
  1189. qd_idx = (pd_idx + 1) % raid_disks;
  1190. *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
  1191. break;
  1192. case ALGORITHM_PARITY_0:
  1193. pd_idx = 0;
  1194. qd_idx = 1;
  1195. (*dd_idx) += 2;
  1196. break;
  1197. case ALGORITHM_PARITY_N:
  1198. pd_idx = data_disks;
  1199. qd_idx = data_disks + 1;
  1200. break;
  1201. case ALGORITHM_ROTATING_ZERO_RESTART:
  1202. /* Exactly the same as RIGHT_ASYMMETRIC, but or
  1203. * of blocks for computing Q is different.
  1204. */
  1205. pd_idx = stripe % raid_disks;
  1206. qd_idx = pd_idx + 1;
  1207. if (pd_idx == raid_disks-1) {
  1208. (*dd_idx)++; /* Q D D D P */
  1209. qd_idx = 0;
  1210. } else if (*dd_idx >= pd_idx)
  1211. (*dd_idx) += 2; /* D D P Q D */
  1212. ddf_layout = 1;
  1213. break;
  1214. case ALGORITHM_ROTATING_N_RESTART:
  1215. /* Same a left_asymmetric, by first stripe is
  1216. * D D D P Q rather than
  1217. * Q D D D P
  1218. */
  1219. pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
  1220. qd_idx = pd_idx + 1;
  1221. if (pd_idx == raid_disks-1) {
  1222. (*dd_idx)++; /* Q D D D P */
  1223. qd_idx = 0;
  1224. } else if (*dd_idx >= pd_idx)
  1225. (*dd_idx) += 2; /* D D P Q D */
  1226. ddf_layout = 1;
  1227. break;
  1228. case ALGORITHM_ROTATING_N_CONTINUE:
  1229. /* Same as left_symmetric but Q is before P */
  1230. pd_idx = raid_disks - 1 - (stripe % raid_disks);
  1231. qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
  1232. *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
  1233. ddf_layout = 1;
  1234. break;
  1235. case ALGORITHM_LEFT_ASYMMETRIC_6:
  1236. /* RAID5 left_asymmetric, with Q on last device */
  1237. pd_idx = data_disks - stripe % (raid_disks-1);
  1238. if (*dd_idx >= pd_idx)
  1239. (*dd_idx)++;
  1240. qd_idx = raid_disks - 1;
  1241. break;
  1242. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  1243. pd_idx = stripe % (raid_disks-1);
  1244. if (*dd_idx >= pd_idx)
  1245. (*dd_idx)++;
  1246. qd_idx = raid_disks - 1;
  1247. break;
  1248. case ALGORITHM_LEFT_SYMMETRIC_6:
  1249. pd_idx = data_disks - stripe % (raid_disks-1);
  1250. *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
  1251. qd_idx = raid_disks - 1;
  1252. break;
  1253. case ALGORITHM_RIGHT_SYMMETRIC_6:
  1254. pd_idx = stripe % (raid_disks-1);
  1255. *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
  1256. qd_idx = raid_disks - 1;
  1257. break;
  1258. case ALGORITHM_PARITY_0_6:
  1259. pd_idx = 0;
  1260. (*dd_idx)++;
  1261. qd_idx = raid_disks - 1;
  1262. break;
  1263. default:
  1264. printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
  1265. conf->algorithm);
  1266. BUG();
  1267. }
  1268. break;
  1269. }
  1270. if (sh) {
  1271. sh->pd_idx = pd_idx;
  1272. sh->qd_idx = qd_idx;
  1273. sh->ddf_layout = ddf_layout;
  1274. }
  1275. /*
  1276. * Finally, compute the new sector number
  1277. */
  1278. new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
  1279. return new_sector;
  1280. }
  1281. static sector_t compute_blocknr(struct stripe_head *sh, int i)
  1282. {
  1283. raid5_conf_t *conf = sh->raid_conf;
  1284. int raid_disks = sh->disks;
  1285. int data_disks = raid_disks - conf->max_degraded;
  1286. sector_t new_sector = sh->sector, check;
  1287. int sectors_per_chunk = conf->chunk_size >> 9;
  1288. sector_t stripe;
  1289. int chunk_offset;
  1290. int chunk_number, dummy1, dd_idx = i;
  1291. sector_t r_sector;
  1292. struct stripe_head sh2;
  1293. chunk_offset = sector_div(new_sector, sectors_per_chunk);
  1294. stripe = new_sector;
  1295. BUG_ON(new_sector != stripe);
  1296. if (i == sh->pd_idx)
  1297. return 0;
  1298. switch(conf->level) {
  1299. case 4: break;
  1300. case 5:
  1301. switch (conf->algorithm) {
  1302. case ALGORITHM_LEFT_ASYMMETRIC:
  1303. case ALGORITHM_RIGHT_ASYMMETRIC:
  1304. if (i > sh->pd_idx)
  1305. i--;
  1306. break;
  1307. case ALGORITHM_LEFT_SYMMETRIC:
  1308. case ALGORITHM_RIGHT_SYMMETRIC:
  1309. if (i < sh->pd_idx)
  1310. i += raid_disks;
  1311. i -= (sh->pd_idx + 1);
  1312. break;
  1313. case ALGORITHM_PARITY_0:
  1314. i -= 1;
  1315. break;
  1316. case ALGORITHM_PARITY_N:
  1317. break;
  1318. default:
  1319. printk(KERN_ERR "raid5: unsupported algorithm %d\n",
  1320. conf->algorithm);
  1321. BUG();
  1322. }
  1323. break;
  1324. case 6:
  1325. if (i == sh->qd_idx)
  1326. return 0; /* It is the Q disk */
  1327. switch (conf->algorithm) {
  1328. case ALGORITHM_LEFT_ASYMMETRIC:
  1329. case ALGORITHM_RIGHT_ASYMMETRIC:
  1330. case ALGORITHM_ROTATING_ZERO_RESTART:
  1331. case ALGORITHM_ROTATING_N_RESTART:
  1332. if (sh->pd_idx == raid_disks-1)
  1333. i--; /* Q D D D P */
  1334. else if (i > sh->pd_idx)
  1335. i -= 2; /* D D P Q D */
  1336. break;
  1337. case ALGORITHM_LEFT_SYMMETRIC:
  1338. case ALGORITHM_RIGHT_SYMMETRIC:
  1339. if (sh->pd_idx == raid_disks-1)
  1340. i--; /* Q D D D P */
  1341. else {
  1342. /* D D P Q D */
  1343. if (i < sh->pd_idx)
  1344. i += raid_disks;
  1345. i -= (sh->pd_idx + 2);
  1346. }
  1347. break;
  1348. case ALGORITHM_PARITY_0:
  1349. i -= 2;
  1350. break;
  1351. case ALGORITHM_PARITY_N:
  1352. break;
  1353. case ALGORITHM_ROTATING_N_CONTINUE:
  1354. if (sh->pd_idx == 0)
  1355. i--; /* P D D D Q */
  1356. else if (i > sh->pd_idx)
  1357. i -= 2; /* D D Q P D */
  1358. break;
  1359. case ALGORITHM_LEFT_ASYMMETRIC_6:
  1360. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  1361. if (i > sh->pd_idx)
  1362. i--;
  1363. break;
  1364. case ALGORITHM_LEFT_SYMMETRIC_6:
  1365. case ALGORITHM_RIGHT_SYMMETRIC_6:
  1366. if (i < sh->pd_idx)
  1367. i += data_disks + 1;
  1368. i -= (sh->pd_idx + 1);
  1369. break;
  1370. case ALGORITHM_PARITY_0_6:
  1371. i -= 1;
  1372. break;
  1373. default:
  1374. printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
  1375. conf->algorithm);
  1376. BUG();
  1377. }
  1378. break;
  1379. }
  1380. chunk_number = stripe * data_disks + i;
  1381. r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
  1382. check = raid5_compute_sector(conf, r_sector,
  1383. (raid_disks != conf->raid_disks),
  1384. &dummy1, &sh2);
  1385. if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
  1386. || sh2.qd_idx != sh->qd_idx) {
  1387. printk(KERN_ERR "compute_blocknr: map not correct\n");
  1388. return 0;
  1389. }
  1390. return r_sector;
  1391. }
  1392. /*
  1393. * Copy data between a page in the stripe cache, and one or more bion
  1394. * The page could align with the middle of the bio, or there could be
  1395. * several bion, each with several bio_vecs, which cover part of the page
  1396. * Multiple bion are linked together on bi_next. There may be extras
  1397. * at the end of this list. We ignore them.
  1398. */
  1399. static void copy_data(int frombio, struct bio *bio,
  1400. struct page *page,
  1401. sector_t sector)
  1402. {
  1403. char *pa = page_address(page);
  1404. struct bio_vec *bvl;
  1405. int i;
  1406. int page_offset;
  1407. if (bio->bi_sector >= sector)
  1408. page_offset = (signed)(bio->bi_sector - sector) * 512;
  1409. else
  1410. page_offset = (signed)(sector - bio->bi_sector) * -512;
  1411. bio_for_each_segment(bvl, bio, i) {
  1412. int len = bio_iovec_idx(bio,i)->bv_len;
  1413. int clen;
  1414. int b_offset = 0;
  1415. if (page_offset < 0) {
  1416. b_offset = -page_offset;
  1417. page_offset += b_offset;
  1418. len -= b_offset;
  1419. }
  1420. if (len > 0 && page_offset + len > STRIPE_SIZE)
  1421. clen = STRIPE_SIZE - page_offset;
  1422. else clen = len;
  1423. if (clen > 0) {
  1424. char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
  1425. if (frombio)
  1426. memcpy(pa+page_offset, ba+b_offset, clen);
  1427. else
  1428. memcpy(ba+b_offset, pa+page_offset, clen);
  1429. __bio_kunmap_atomic(ba, KM_USER0);
  1430. }
  1431. if (clen < len) /* hit end of page */
  1432. break;
  1433. page_offset += len;
  1434. }
  1435. }
  1436. #define check_xor() do { \
  1437. if (count == MAX_XOR_BLOCKS) { \
  1438. xor_blocks(count, STRIPE_SIZE, dest, ptr);\
  1439. count = 0; \
  1440. } \
  1441. } while(0)
  1442. static void compute_parity6(struct stripe_head *sh, int method)
  1443. {
  1444. raid5_conf_t *conf = sh->raid_conf;
  1445. int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count;
  1446. int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
  1447. struct bio *chosen;
  1448. /**** FIX THIS: This could be very bad if disks is close to 256 ****/
  1449. void *ptrs[syndrome_disks+2];
  1450. pd_idx = sh->pd_idx;
  1451. qd_idx = sh->qd_idx;
  1452. d0_idx = raid6_d0(sh);
  1453. pr_debug("compute_parity, stripe %llu, method %d\n",
  1454. (unsigned long long)sh->sector, method);
  1455. switch(method) {
  1456. case READ_MODIFY_WRITE:
  1457. BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */
  1458. case RECONSTRUCT_WRITE:
  1459. for (i= disks; i-- ;)
  1460. if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) {
  1461. chosen = sh->dev[i].towrite;
  1462. sh->dev[i].towrite = NULL;
  1463. if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
  1464. wake_up(&conf->wait_for_overlap);
  1465. BUG_ON(sh->dev[i].written);
  1466. sh->dev[i].written = chosen;
  1467. }
  1468. break;
  1469. case CHECK_PARITY:
  1470. BUG(); /* Not implemented yet */
  1471. }
  1472. for (i = disks; i--;)
  1473. if (sh->dev[i].written) {
  1474. sector_t sector = sh->dev[i].sector;
  1475. struct bio *wbi = sh->dev[i].written;
  1476. while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
  1477. copy_data(1, wbi, sh->dev[i].page, sector);
  1478. wbi = r5_next_bio(wbi, sector);
  1479. }
  1480. set_bit(R5_LOCKED, &sh->dev[i].flags);
  1481. set_bit(R5_UPTODATE, &sh->dev[i].flags);
  1482. }
  1483. /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/
  1484. for (i = 0; i < disks; i++)
  1485. ptrs[i] = (void *)raid6_empty_zero_page;
  1486. count = 0;
  1487. i = d0_idx;
  1488. do {
  1489. int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
  1490. ptrs[slot] = page_address(sh->dev[i].page);
  1491. if (slot < syndrome_disks &&
  1492. !test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
  1493. printk(KERN_ERR "block %d/%d not uptodate "
  1494. "on parity calc\n", i, count);
  1495. BUG();
  1496. }
  1497. i = raid6_next_disk(i, disks);
  1498. } while (i != d0_idx);
  1499. BUG_ON(count != syndrome_disks);
  1500. raid6_call.gen_syndrome(syndrome_disks+2, STRIPE_SIZE, ptrs);
  1501. switch(method) {
  1502. case RECONSTRUCT_WRITE:
  1503. set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
  1504. set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
  1505. set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
  1506. set_bit(R5_LOCKED, &sh->dev[qd_idx].flags);
  1507. break;
  1508. case UPDATE_PARITY:
  1509. set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
  1510. set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
  1511. break;
  1512. }
  1513. }
  1514. /* Compute one missing block */
  1515. static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
  1516. {
  1517. int i, count, disks = sh->disks;
  1518. void *ptr[MAX_XOR_BLOCKS], *dest, *p;
  1519. int qd_idx = sh->qd_idx;
  1520. pr_debug("compute_block_1, stripe %llu, idx %d\n",
  1521. (unsigned long long)sh->sector, dd_idx);
  1522. if ( dd_idx == qd_idx ) {
  1523. /* We're actually computing the Q drive */
  1524. compute_parity6(sh, UPDATE_PARITY);
  1525. } else {
  1526. dest = page_address(sh->dev[dd_idx].page);
  1527. if (!nozero) memset(dest, 0, STRIPE_SIZE);
  1528. count = 0;
  1529. for (i = disks ; i--; ) {
  1530. if (i == dd_idx || i == qd_idx)
  1531. continue;
  1532. p = page_address(sh->dev[i].page);
  1533. if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
  1534. ptr[count++] = p;
  1535. else
  1536. printk("compute_block() %d, stripe %llu, %d"
  1537. " not present\n", dd_idx,
  1538. (unsigned long long)sh->sector, i);
  1539. check_xor();
  1540. }
  1541. if (count)
  1542. xor_blocks(count, STRIPE_SIZE, dest, ptr);
  1543. if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
  1544. else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
  1545. }
  1546. }
  1547. /* Compute two missing blocks */
  1548. static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
  1549. {
  1550. int i, count, disks = sh->disks;
  1551. int syndrome_disks = sh->ddf_layout ? disks : disks-2;
  1552. int d0_idx = raid6_d0(sh);
  1553. int faila = -1, failb = -1;
  1554. /**** FIX THIS: This could be very bad if disks is close to 256 ****/
  1555. void *ptrs[syndrome_disks+2];
  1556. for (i = 0; i < disks ; i++)
  1557. ptrs[i] = (void *)raid6_empty_zero_page;
  1558. count = 0;
  1559. i = d0_idx;
  1560. do {
  1561. int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
  1562. ptrs[slot] = page_address(sh->dev[i].page);
  1563. if (i == dd_idx1)
  1564. faila = slot;
  1565. if (i == dd_idx2)
  1566. failb = slot;
  1567. i = raid6_next_disk(i, disks);
  1568. } while (i != d0_idx);
  1569. BUG_ON(count != syndrome_disks);
  1570. BUG_ON(faila == failb);
  1571. if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
  1572. pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
  1573. (unsigned long long)sh->sector, dd_idx1, dd_idx2,
  1574. faila, failb);
  1575. if (failb == syndrome_disks+1) {
  1576. /* Q disk is one of the missing disks */
  1577. if (faila == syndrome_disks) {
  1578. /* Missing P+Q, just recompute */
  1579. compute_parity6(sh, UPDATE_PARITY);
  1580. return;
  1581. } else {
  1582. /* We're missing D+Q; recompute D from P */
  1583. compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ?
  1584. dd_idx2 : dd_idx1),
  1585. 0);
  1586. compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */
  1587. return;
  1588. }
  1589. }
  1590. /* We're missing D+P or D+D; */
  1591. if (failb == syndrome_disks) {
  1592. /* We're missing D+P. */
  1593. raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, faila, ptrs);
  1594. } else {
  1595. /* We're missing D+D. */
  1596. raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, faila, failb,
  1597. ptrs);
  1598. }
  1599. /* Both the above update both missing blocks */
  1600. set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
  1601. set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
  1602. }
  1603. static void
  1604. schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
  1605. int rcw, int expand)
  1606. {
  1607. int i, pd_idx = sh->pd_idx, disks = sh->disks;
  1608. if (rcw) {
  1609. /* if we are not expanding this is a proper write request, and
  1610. * there will be bios with new data to be drained into the
  1611. * stripe cache
  1612. */
  1613. if (!expand) {
  1614. sh->reconstruct_state = reconstruct_state_drain_run;
  1615. set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
  1616. } else
  1617. sh->reconstruct_state = reconstruct_state_run;
  1618. set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
  1619. for (i = disks; i--; ) {
  1620. struct r5dev *dev = &sh->dev[i];
  1621. if (dev->towrite) {
  1622. set_bit(R5_LOCKED, &dev->flags);
  1623. set_bit(R5_Wantdrain, &dev->flags);
  1624. if (!expand)
  1625. clear_bit(R5_UPTODATE, &dev->flags);
  1626. s->locked++;
  1627. }
  1628. }
  1629. if (s->locked + 1 == disks)
  1630. if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
  1631. atomic_inc(&sh->raid_conf->pending_full_writes);
  1632. } else {
  1633. BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
  1634. test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
  1635. sh->reconstruct_state = reconstruct_state_prexor_drain_run;
  1636. set_bit(STRIPE_OP_PREXOR, &s->ops_request);
  1637. set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
  1638. set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
  1639. for (i = disks; i--; ) {
  1640. struct r5dev *dev = &sh->dev[i];
  1641. if (i == pd_idx)
  1642. continue;
  1643. if (dev->towrite &&
  1644. (test_bit(R5_UPTODATE, &dev->flags) ||
  1645. test_bit(R5_Wantcompute, &dev->flags))) {
  1646. set_bit(R5_Wantdrain, &dev->flags);
  1647. set_bit(R5_LOCKED, &dev->flags);
  1648. clear_bit(R5_UPTODATE, &dev->flags);
  1649. s->locked++;
  1650. }
  1651. }
  1652. }
  1653. /* keep the parity disk locked while asynchronous operations
  1654. * are in flight
  1655. */
  1656. set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
  1657. clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
  1658. s->locked++;
  1659. pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
  1660. __func__, (unsigned long long)sh->sector,
  1661. s->locked, s->ops_request);
  1662. }
  1663. /*
  1664. * Each stripe/dev can have one or more bion attached.
  1665. * toread/towrite point to the first in a chain.
  1666. * The bi_next chain must be in order.
  1667. */
  1668. static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
  1669. {
  1670. struct bio **bip;
  1671. raid5_conf_t *conf = sh->raid_conf;
  1672. int firstwrite=0;
  1673. pr_debug("adding bh b#%llu to stripe s#%llu\n",
  1674. (unsigned long long)bi->bi_sector,
  1675. (unsigned long long)sh->sector);
  1676. spin_lock(&sh->lock);
  1677. spin_lock_irq(&conf->device_lock);
  1678. if (forwrite) {
  1679. bip = &sh->dev[dd_idx].towrite;
  1680. if (*bip == NULL && sh->dev[dd_idx].written == NULL)
  1681. firstwrite = 1;
  1682. } else
  1683. bip = &sh->dev[dd_idx].toread;
  1684. while (*bip && (*bip)->bi_sector < bi->bi_sector) {
  1685. if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
  1686. goto overlap;
  1687. bip = & (*bip)->bi_next;
  1688. }
  1689. if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
  1690. goto overlap;
  1691. BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
  1692. if (*bip)
  1693. bi->bi_next = *bip;
  1694. *bip = bi;
  1695. bi->bi_phys_segments++;
  1696. spin_unlock_irq(&conf->device_lock);
  1697. spin_unlock(&sh->lock);
  1698. pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
  1699. (unsigned long long)bi->bi_sector,
  1700. (unsigned long long)sh->sector, dd_idx);
  1701. if (conf->mddev->bitmap && firstwrite) {
  1702. bitmap_startwrite(conf->mddev->bitmap, sh->sector,
  1703. STRIPE_SECTORS, 0);
  1704. sh->bm_seq = conf->seq_flush+1;
  1705. set_bit(STRIPE_BIT_DELAY, &sh->state);
  1706. }
  1707. if (forwrite) {
  1708. /* check if page is covered */
  1709. sector_t sector = sh->dev[dd_idx].sector;
  1710. for (bi=sh->dev[dd_idx].towrite;
  1711. sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
  1712. bi && bi->bi_sector <= sector;
  1713. bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
  1714. if (bi->bi_sector + (bi->bi_size>>9) >= sector)
  1715. sector = bi->bi_sector + (bi->bi_size>>9);
  1716. }
  1717. if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
  1718. set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
  1719. }
  1720. return 1;
  1721. overlap:
  1722. set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
  1723. spin_unlock_irq(&conf->device_lock);
  1724. spin_unlock(&sh->lock);
  1725. return 0;
  1726. }
  1727. static void end_reshape(raid5_conf_t *conf);
  1728. static int page_is_zero(struct page *p)
  1729. {
  1730. char *a = page_address(p);
  1731. return ((*(u32*)a) == 0 &&
  1732. memcmp(a, a+4, STRIPE_SIZE-4)==0);
  1733. }
  1734. static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
  1735. struct stripe_head *sh)
  1736. {
  1737. int sectors_per_chunk = conf->chunk_size >> 9;
  1738. int dd_idx;
  1739. int chunk_offset = sector_div(stripe, sectors_per_chunk);
  1740. int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
  1741. raid5_compute_sector(conf,
  1742. stripe * (disks - conf->max_degraded)
  1743. *sectors_per_chunk + chunk_offset,
  1744. previous,
  1745. &dd_idx, sh);
  1746. }
  1747. static void
  1748. handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
  1749. struct stripe_head_state *s, int disks,
  1750. struct bio **return_bi)
  1751. {
  1752. int i;
  1753. for (i = disks; i--; ) {
  1754. struct bio *bi;
  1755. int bitmap_end = 0;
  1756. if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
  1757. mdk_rdev_t *rdev;
  1758. rcu_read_lock();
  1759. rdev = rcu_dereference(conf->disks[i].rdev);
  1760. if (rdev && test_bit(In_sync, &rdev->flags))
  1761. /* multiple read failures in one stripe */
  1762. md_error(conf->mddev, rdev);
  1763. rcu_read_unlock();
  1764. }
  1765. spin_lock_irq(&conf->device_lock);
  1766. /* fail all writes first */
  1767. bi = sh->dev[i].towrite;
  1768. sh->dev[i].towrite = NULL;
  1769. if (bi) {
  1770. s->to_write--;
  1771. bitmap_end = 1;
  1772. }
  1773. if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
  1774. wake_up(&conf->wait_for_overlap);
  1775. while (bi && bi->bi_sector <
  1776. sh->dev[i].sector + STRIPE_SECTORS) {
  1777. struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
  1778. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  1779. if (!raid5_dec_bi_phys_segments(bi)) {
  1780. md_write_end(conf->mddev);
  1781. bi->bi_next = *return_bi;
  1782. *return_bi = bi;
  1783. }
  1784. bi = nextbi;
  1785. }
  1786. /* and fail all 'written' */
  1787. bi = sh->dev[i].written;
  1788. sh->dev[i].written = NULL;
  1789. if (bi) bitmap_end = 1;
  1790. while (bi && bi->bi_sector <
  1791. sh->dev[i].sector + STRIPE_SECTORS) {
  1792. struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
  1793. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  1794. if (!raid5_dec_bi_phys_segments(bi)) {
  1795. md_write_end(conf->mddev);
  1796. bi->bi_next = *return_bi;
  1797. *return_bi = bi;
  1798. }
  1799. bi = bi2;
  1800. }
  1801. /* fail any reads if this device is non-operational and
  1802. * the data has not reached the cache yet.
  1803. */
  1804. if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
  1805. (!test_bit(R5_Insync, &sh->dev[i].flags) ||
  1806. test_bit(R5_ReadError, &sh->dev[i].flags))) {
  1807. bi = sh->dev[i].toread;
  1808. sh->dev[i].toread = NULL;
  1809. if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
  1810. wake_up(&conf->wait_for_overlap);
  1811. if (bi) s->to_read--;
  1812. while (bi && bi->bi_sector <
  1813. sh->dev[i].sector + STRIPE_SECTORS) {
  1814. struct bio *nextbi =
  1815. r5_next_bio(bi, sh->dev[i].sector);
  1816. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  1817. if (!raid5_dec_bi_phys_segments(bi)) {
  1818. bi->bi_next = *return_bi;
  1819. *return_bi = bi;
  1820. }
  1821. bi = nextbi;
  1822. }
  1823. }
  1824. spin_unlock_irq(&conf->device_lock);
  1825. if (bitmap_end)
  1826. bitmap_endwrite(conf->mddev->bitmap, sh->sector,
  1827. STRIPE_SECTORS, 0, 0);
  1828. }
  1829. if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
  1830. if (atomic_dec_and_test(&conf->pending_full_writes))
  1831. md_wakeup_thread(conf->mddev->thread);
  1832. }
  1833. /* fetch_block5 - checks the given member device to see if its data needs
  1834. * to be read or computed to satisfy a request.
  1835. *
  1836. * Returns 1 when no more member devices need to be checked, otherwise returns
  1837. * 0 to tell the loop in handle_stripe_fill5 to continue
  1838. */
  1839. static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
  1840. int disk_idx, int disks)
  1841. {
  1842. struct r5dev *dev = &sh->dev[disk_idx];
  1843. struct r5dev *failed_dev = &sh->dev[s->failed_num];
  1844. /* is the data in this block needed, and can we get it? */
  1845. if (!test_bit(R5_LOCKED, &dev->flags) &&
  1846. !test_bit(R5_UPTODATE, &dev->flags) &&
  1847. (dev->toread ||
  1848. (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
  1849. s->syncing || s->expanding ||
  1850. (s->failed &&
  1851. (failed_dev->toread ||
  1852. (failed_dev->towrite &&
  1853. !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
  1854. /* We would like to get this block, possibly by computing it,
  1855. * otherwise read it if the backing disk is insync
  1856. */
  1857. if ((s->uptodate == disks - 1) &&
  1858. (s->failed && disk_idx == s->failed_num)) {
  1859. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  1860. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  1861. set_bit(R5_Wantcompute, &dev->flags);
  1862. sh->ops.target = disk_idx;
  1863. s->req_compute = 1;
  1864. /* Careful: from this point on 'uptodate' is in the eye
  1865. * of raid5_run_ops which services 'compute' operations
  1866. * before writes. R5_Wantcompute flags a block that will
  1867. * be R5_UPTODATE by the time it is needed for a
  1868. * subsequent operation.
  1869. */
  1870. s->uptodate++;
  1871. return 1; /* uptodate + compute == disks */
  1872. } else if (test_bit(R5_Insync, &dev->flags)) {
  1873. set_bit(R5_LOCKED, &dev->flags);
  1874. set_bit(R5_Wantread, &dev->flags);
  1875. s->locked++;
  1876. pr_debug("Reading block %d (sync=%d)\n", disk_idx,
  1877. s->syncing);
  1878. }
  1879. }
  1880. return 0;
  1881. }
  1882. /**
  1883. * handle_stripe_fill5 - read or compute data to satisfy pending requests.
  1884. */
  1885. static void handle_stripe_fill5(struct stripe_head *sh,
  1886. struct stripe_head_state *s, int disks)
  1887. {
  1888. int i;
  1889. /* look for blocks to read/compute, skip this if a compute
  1890. * is already in flight, or if the stripe contents are in the
  1891. * midst of changing due to a write
  1892. */
  1893. if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
  1894. !sh->reconstruct_state)
  1895. for (i = disks; i--; )
  1896. if (fetch_block5(sh, s, i, disks))
  1897. break;
  1898. set_bit(STRIPE_HANDLE, &sh->state);
  1899. }
  1900. static void handle_stripe_fill6(struct stripe_head *sh,
  1901. struct stripe_head_state *s, struct r6_state *r6s,
  1902. int disks)
  1903. {
  1904. int i;
  1905. for (i = disks; i--; ) {
  1906. struct r5dev *dev = &sh->dev[i];
  1907. if (!test_bit(R5_LOCKED, &dev->flags) &&
  1908. !test_bit(R5_UPTODATE, &dev->flags) &&
  1909. (dev->toread || (dev->towrite &&
  1910. !test_bit(R5_OVERWRITE, &dev->flags)) ||
  1911. s->syncing || s->expanding ||
  1912. (s->failed >= 1 &&
  1913. (sh->dev[r6s->failed_num[0]].toread ||
  1914. s->to_write)) ||
  1915. (s->failed >= 2 &&
  1916. (sh->dev[r6s->failed_num[1]].toread ||
  1917. s->to_write)))) {
  1918. /* we would like to get this block, possibly
  1919. * by computing it, but we might not be able to
  1920. */
  1921. if ((s->uptodate == disks - 1) &&
  1922. (s->failed && (i == r6s->failed_num[0] ||
  1923. i == r6s->failed_num[1]))) {
  1924. pr_debug("Computing stripe %llu block %d\n",
  1925. (unsigned long long)sh->sector, i);
  1926. compute_block_1(sh, i, 0);
  1927. s->uptodate++;
  1928. } else if ( s->uptodate == disks-2 && s->failed >= 2 ) {
  1929. /* Computing 2-failure is *very* expensive; only
  1930. * do it if failed >= 2
  1931. */
  1932. int other;
  1933. for (other = disks; other--; ) {
  1934. if (other == i)
  1935. continue;
  1936. if (!test_bit(R5_UPTODATE,
  1937. &sh->dev[other].flags))
  1938. break;
  1939. }
  1940. BUG_ON(other < 0);
  1941. pr_debug("Computing stripe %llu blocks %d,%d\n",
  1942. (unsigned long long)sh->sector,
  1943. i, other);
  1944. compute_block_2(sh, i, other);
  1945. s->uptodate += 2;
  1946. } else if (test_bit(R5_Insync, &dev->flags)) {
  1947. set_bit(R5_LOCKED, &dev->flags);
  1948. set_bit(R5_Wantread, &dev->flags);
  1949. s->locked++;
  1950. pr_debug("Reading block %d (sync=%d)\n",
  1951. i, s->syncing);
  1952. }
  1953. }
  1954. }
  1955. set_bit(STRIPE_HANDLE, &sh->state);
  1956. }
  1957. /* handle_stripe_clean_event
  1958. * any written block on an uptodate or failed drive can be returned.
  1959. * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
  1960. * never LOCKED, so we don't need to test 'failed' directly.
  1961. */
  1962. static void handle_stripe_clean_event(raid5_conf_t *conf,
  1963. struct stripe_head *sh, int disks, struct bio **return_bi)
  1964. {
  1965. int i;
  1966. struct r5dev *dev;
  1967. for (i = disks; i--; )
  1968. if (sh->dev[i].written) {
  1969. dev = &sh->dev[i];
  1970. if (!test_bit(R5_LOCKED, &dev->flags) &&
  1971. test_bit(R5_UPTODATE, &dev->flags)) {
  1972. /* We can return any write requests */
  1973. struct bio *wbi, *wbi2;
  1974. int bitmap_end = 0;
  1975. pr_debug("Return write for disc %d\n", i);
  1976. spin_lock_irq(&conf->device_lock);
  1977. wbi = dev->written;
  1978. dev->written = NULL;
  1979. while (wbi && wbi->bi_sector <
  1980. dev->sector + STRIPE_SECTORS) {
  1981. wbi2 = r5_next_bio(wbi, dev->sector);
  1982. if (!raid5_dec_bi_phys_segments(wbi)) {
  1983. md_write_end(conf->mddev);
  1984. wbi->bi_next = *return_bi;
  1985. *return_bi = wbi;
  1986. }
  1987. wbi = wbi2;
  1988. }
  1989. if (dev->towrite == NULL)
  1990. bitmap_end = 1;
  1991. spin_unlock_irq(&conf->device_lock);
  1992. if (bitmap_end)
  1993. bitmap_endwrite(conf->mddev->bitmap,
  1994. sh->sector,
  1995. STRIPE_SECTORS,
  1996. !test_bit(STRIPE_DEGRADED, &sh->state),
  1997. 0);
  1998. }
  1999. }
  2000. if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
  2001. if (atomic_dec_and_test(&conf->pending_full_writes))
  2002. md_wakeup_thread(conf->mddev->thread);
  2003. }
  2004. static void handle_stripe_dirtying5(raid5_conf_t *conf,
  2005. struct stripe_head *sh, struct stripe_head_state *s, int disks)
  2006. {
  2007. int rmw = 0, rcw = 0, i;
  2008. for (i = disks; i--; ) {
  2009. /* would I have to read this buffer for read_modify_write */
  2010. struct r5dev *dev = &sh->dev[i];
  2011. if ((dev->towrite || i == sh->pd_idx) &&
  2012. !test_bit(R5_LOCKED, &dev->flags) &&
  2013. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2014. test_bit(R5_Wantcompute, &dev->flags))) {
  2015. if (test_bit(R5_Insync, &dev->flags))
  2016. rmw++;
  2017. else
  2018. rmw += 2*disks; /* cannot read it */
  2019. }
  2020. /* Would I have to read this buffer for reconstruct_write */
  2021. if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
  2022. !test_bit(R5_LOCKED, &dev->flags) &&
  2023. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2024. test_bit(R5_Wantcompute, &dev->flags))) {
  2025. if (test_bit(R5_Insync, &dev->flags)) rcw++;
  2026. else
  2027. rcw += 2*disks;
  2028. }
  2029. }
  2030. pr_debug("for sector %llu, rmw=%d rcw=%d\n",
  2031. (unsigned long long)sh->sector, rmw, rcw);
  2032. set_bit(STRIPE_HANDLE, &sh->state);
  2033. if (rmw < rcw && rmw > 0)
  2034. /* prefer read-modify-write, but need to get some data */
  2035. for (i = disks; i--; ) {
  2036. struct r5dev *dev = &sh->dev[i];
  2037. if ((dev->towrite || i == sh->pd_idx) &&
  2038. !test_bit(R5_LOCKED, &dev->flags) &&
  2039. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2040. test_bit(R5_Wantcompute, &dev->flags)) &&
  2041. test_bit(R5_Insync, &dev->flags)) {
  2042. if (
  2043. test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
  2044. pr_debug("Read_old block "
  2045. "%d for r-m-w\n", i);
  2046. set_bit(R5_LOCKED, &dev->flags);
  2047. set_bit(R5_Wantread, &dev->flags);
  2048. s->locked++;
  2049. } else {
  2050. set_bit(STRIPE_DELAYED, &sh->state);
  2051. set_bit(STRIPE_HANDLE, &sh->state);
  2052. }
  2053. }
  2054. }
  2055. if (rcw <= rmw && rcw > 0)
  2056. /* want reconstruct write, but need to get some data */
  2057. for (i = disks; i--; ) {
  2058. struct r5dev *dev = &sh->dev[i];
  2059. if (!test_bit(R5_OVERWRITE, &dev->flags) &&
  2060. i != sh->pd_idx &&
  2061. !test_bit(R5_LOCKED, &dev->flags) &&
  2062. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2063. test_bit(R5_Wantcompute, &dev->flags)) &&
  2064. test_bit(R5_Insync, &dev->flags)) {
  2065. if (
  2066. test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
  2067. pr_debug("Read_old block "
  2068. "%d for Reconstruct\n", i);
  2069. set_bit(R5_LOCKED, &dev->flags);
  2070. set_bit(R5_Wantread, &dev->flags);
  2071. s->locked++;
  2072. } else {
  2073. set_bit(STRIPE_DELAYED, &sh->state);
  2074. set_bit(STRIPE_HANDLE, &sh->state);
  2075. }
  2076. }
  2077. }
  2078. /* now if nothing is locked, and if we have enough data,
  2079. * we can start a write request
  2080. */
  2081. /* since handle_stripe can be called at any time we need to handle the
  2082. * case where a compute block operation has been submitted and then a
  2083. * subsequent call wants to start a write request. raid5_run_ops only
  2084. * handles the case where compute block and postxor are requested
  2085. * simultaneously. If this is not the case then new writes need to be
  2086. * held off until the compute completes.
  2087. */
  2088. if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
  2089. (s->locked == 0 && (rcw == 0 || rmw == 0) &&
  2090. !test_bit(STRIPE_BIT_DELAY, &sh->state)))
  2091. schedule_reconstruction5(sh, s, rcw == 0, 0);
  2092. }
  2093. static void handle_stripe_dirtying6(raid5_conf_t *conf,
  2094. struct stripe_head *sh, struct stripe_head_state *s,
  2095. struct r6_state *r6s, int disks)
  2096. {
  2097. int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i;
  2098. int qd_idx = r6s->qd_idx;
  2099. for (i = disks; i--; ) {
  2100. struct r5dev *dev = &sh->dev[i];
  2101. /* Would I have to read this buffer for reconstruct_write */
  2102. if (!test_bit(R5_OVERWRITE, &dev->flags)
  2103. && i != pd_idx && i != qd_idx
  2104. && (!test_bit(R5_LOCKED, &dev->flags)
  2105. ) &&
  2106. !test_bit(R5_UPTODATE, &dev->flags)) {
  2107. if (test_bit(R5_Insync, &dev->flags)) rcw++;
  2108. else {
  2109. pr_debug("raid6: must_compute: "
  2110. "disk %d flags=%#lx\n", i, dev->flags);
  2111. must_compute++;
  2112. }
  2113. }
  2114. }
  2115. pr_debug("for sector %llu, rcw=%d, must_compute=%d\n",
  2116. (unsigned long long)sh->sector, rcw, must_compute);
  2117. set_bit(STRIPE_HANDLE, &sh->state);
  2118. if (rcw > 0)
  2119. /* want reconstruct write, but need to get some data */
  2120. for (i = disks; i--; ) {
  2121. struct r5dev *dev = &sh->dev[i];
  2122. if (!test_bit(R5_OVERWRITE, &dev->flags)
  2123. && !(s->failed == 0 && (i == pd_idx || i == qd_idx))
  2124. && !test_bit(R5_LOCKED, &dev->flags) &&
  2125. !test_bit(R5_UPTODATE, &dev->flags) &&
  2126. test_bit(R5_Insync, &dev->flags)) {
  2127. if (
  2128. test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
  2129. pr_debug("Read_old stripe %llu "
  2130. "block %d for Reconstruct\n",
  2131. (unsigned long long)sh->sector, i);
  2132. set_bit(R5_LOCKED, &dev->flags);
  2133. set_bit(R5_Wantread, &dev->flags);
  2134. s->locked++;
  2135. } else {
  2136. pr_debug("Request delayed stripe %llu "
  2137. "block %d for Reconstruct\n",
  2138. (unsigned long long)sh->sector, i);
  2139. set_bit(STRIPE_DELAYED, &sh->state);
  2140. set_bit(STRIPE_HANDLE, &sh->state);
  2141. }
  2142. }
  2143. }
  2144. /* now if nothing is locked, and if we have enough data, we can start a
  2145. * write request
  2146. */
  2147. if (s->locked == 0 && rcw == 0 &&
  2148. !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
  2149. if (must_compute > 0) {
  2150. /* We have failed blocks and need to compute them */
  2151. switch (s->failed) {
  2152. case 0:
  2153. BUG();
  2154. case 1:
  2155. compute_block_1(sh, r6s->failed_num[0], 0);
  2156. break;
  2157. case 2:
  2158. compute_block_2(sh, r6s->failed_num[0],
  2159. r6s->failed_num[1]);
  2160. break;
  2161. default: /* This request should have been failed? */
  2162. BUG();
  2163. }
  2164. }
  2165. pr_debug("Computing parity for stripe %llu\n",
  2166. (unsigned long long)sh->sector);
  2167. compute_parity6(sh, RECONSTRUCT_WRITE);
  2168. /* now every locked buffer is ready to be written */
  2169. for (i = disks; i--; )
  2170. if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
  2171. pr_debug("Writing stripe %llu block %d\n",
  2172. (unsigned long long)sh->sector, i);
  2173. s->locked++;
  2174. set_bit(R5_Wantwrite, &sh->dev[i].flags);
  2175. }
  2176. if (s->locked == disks)
  2177. if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
  2178. atomic_inc(&conf->pending_full_writes);
  2179. /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
  2180. set_bit(STRIPE_INSYNC, &sh->state);
  2181. if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
  2182. atomic_dec(&conf->preread_active_stripes);
  2183. if (atomic_read(&conf->preread_active_stripes) <
  2184. IO_THRESHOLD)
  2185. md_wakeup_thread(conf->mddev->thread);
  2186. }
  2187. }
  2188. }
  2189. static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
  2190. struct stripe_head_state *s, int disks)
  2191. {
  2192. struct r5dev *dev = NULL;
  2193. set_bit(STRIPE_HANDLE, &sh->state);
  2194. switch (sh->check_state) {
  2195. case check_state_idle:
  2196. /* start a new check operation if there are no failures */
  2197. if (s->failed == 0) {
  2198. BUG_ON(s->uptodate != disks);
  2199. sh->check_state = check_state_run;
  2200. set_bit(STRIPE_OP_CHECK, &s->ops_request);
  2201. clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
  2202. s->uptodate--;
  2203. break;
  2204. }
  2205. dev = &sh->dev[s->failed_num];
  2206. /* fall through */
  2207. case check_state_compute_result:
  2208. sh->check_state = check_state_idle;
  2209. if (!dev)
  2210. dev = &sh->dev[sh->pd_idx];
  2211. /* check that a write has not made the stripe insync */
  2212. if (test_bit(STRIPE_INSYNC, &sh->state))
  2213. break;
  2214. /* either failed parity check, or recovery is happening */
  2215. BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
  2216. BUG_ON(s->uptodate != disks);
  2217. set_bit(R5_LOCKED, &dev->flags);
  2218. s->locked++;
  2219. set_bit(R5_Wantwrite, &dev->flags);
  2220. clear_bit(STRIPE_DEGRADED, &sh->state);
  2221. set_bit(STRIPE_INSYNC, &sh->state);
  2222. break;
  2223. case check_state_run:
  2224. break; /* we will be called again upon completion */
  2225. case check_state_check_result:
  2226. sh->check_state = check_state_idle;
  2227. /* if a failure occurred during the check operation, leave
  2228. * STRIPE_INSYNC not set and let the stripe be handled again
  2229. */
  2230. if (s->failed)
  2231. break;
  2232. /* handle a successful check operation, if parity is correct
  2233. * we are done. Otherwise update the mismatch count and repair
  2234. * parity if !MD_RECOVERY_CHECK
  2235. */
  2236. if (sh->ops.zero_sum_result == 0)
  2237. /* parity is correct (on disc,
  2238. * not in buffer any more)
  2239. */
  2240. set_bit(STRIPE_INSYNC, &sh->state);
  2241. else {
  2242. conf->mddev->resync_mismatches += STRIPE_SECTORS;
  2243. if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
  2244. /* don't try to repair!! */
  2245. set_bit(STRIPE_INSYNC, &sh->state);
  2246. else {
  2247. sh->check_state = check_state_compute_run;
  2248. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  2249. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  2250. set_bit(R5_Wantcompute,
  2251. &sh->dev[sh->pd_idx].flags);
  2252. sh->ops.target = sh->pd_idx;
  2253. s->uptodate++;
  2254. }
  2255. }
  2256. break;
  2257. case check_state_compute_run:
  2258. break;
  2259. default:
  2260. printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
  2261. __func__, sh->check_state,
  2262. (unsigned long long) sh->sector);
  2263. BUG();
  2264. }
  2265. }
  2266. static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
  2267. struct stripe_head_state *s,
  2268. struct r6_state *r6s, struct page *tmp_page,
  2269. int disks)
  2270. {
  2271. int update_p = 0, update_q = 0;
  2272. struct r5dev *dev;
  2273. int pd_idx = sh->pd_idx;
  2274. int qd_idx = r6s->qd_idx;
  2275. set_bit(STRIPE_HANDLE, &sh->state);
  2276. BUG_ON(s->failed > 2);
  2277. BUG_ON(s->uptodate < disks);
  2278. /* Want to check and possibly repair P and Q.
  2279. * However there could be one 'failed' device, in which
  2280. * case we can only check one of them, possibly using the
  2281. * other to generate missing data
  2282. */
  2283. /* If !tmp_page, we cannot do the calculations,
  2284. * but as we have set STRIPE_HANDLE, we will soon be called
  2285. * by stripe_handle with a tmp_page - just wait until then.
  2286. */
  2287. if (tmp_page) {
  2288. if (s->failed == r6s->q_failed) {
  2289. /* The only possible failed device holds 'Q', so it
  2290. * makes sense to check P (If anything else were failed,
  2291. * we would have used P to recreate it).
  2292. */
  2293. compute_block_1(sh, pd_idx, 1);
  2294. if (!page_is_zero(sh->dev[pd_idx].page)) {
  2295. compute_block_1(sh, pd_idx, 0);
  2296. update_p = 1;
  2297. }
  2298. }
  2299. if (!r6s->q_failed && s->failed < 2) {
  2300. /* q is not failed, and we didn't use it to generate
  2301. * anything, so it makes sense to check it
  2302. */
  2303. memcpy(page_address(tmp_page),
  2304. page_address(sh->dev[qd_idx].page),
  2305. STRIPE_SIZE);
  2306. compute_parity6(sh, UPDATE_PARITY);
  2307. if (memcmp(page_address(tmp_page),
  2308. page_address(sh->dev[qd_idx].page),
  2309. STRIPE_SIZE) != 0) {
  2310. clear_bit(STRIPE_INSYNC, &sh->state);
  2311. update_q = 1;
  2312. }
  2313. }
  2314. if (update_p || update_q) {
  2315. conf->mddev->resync_mismatches += STRIPE_SECTORS;
  2316. if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
  2317. /* don't try to repair!! */
  2318. update_p = update_q = 0;
  2319. }
  2320. /* now write out any block on a failed drive,
  2321. * or P or Q if they need it
  2322. */
  2323. if (s->failed == 2) {
  2324. dev = &sh->dev[r6s->failed_num[1]];
  2325. s->locked++;
  2326. set_bit(R5_LOCKED, &dev->flags);
  2327. set_bit(R5_Wantwrite, &dev->flags);
  2328. }
  2329. if (s->failed >= 1) {
  2330. dev = &sh->dev[r6s->failed_num[0]];
  2331. s->locked++;
  2332. set_bit(R5_LOCKED, &dev->flags);
  2333. set_bit(R5_Wantwrite, &dev->flags);
  2334. }
  2335. if (update_p) {
  2336. dev = &sh->dev[pd_idx];
  2337. s->locked++;
  2338. set_bit(R5_LOCKED, &dev->flags);
  2339. set_bit(R5_Wantwrite, &dev->flags);
  2340. }
  2341. if (update_q) {
  2342. dev = &sh->dev[qd_idx];
  2343. s->locked++;
  2344. set_bit(R5_LOCKED, &dev->flags);
  2345. set_bit(R5_Wantwrite, &dev->flags);
  2346. }
  2347. clear_bit(STRIPE_DEGRADED, &sh->state);
  2348. set_bit(STRIPE_INSYNC, &sh->state);
  2349. }
  2350. }
  2351. static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
  2352. struct r6_state *r6s)
  2353. {
  2354. int i;
  2355. /* We have read all the blocks in this stripe and now we need to
  2356. * copy some of them into a target stripe for expand.
  2357. */
  2358. struct dma_async_tx_descriptor *tx = NULL;
  2359. clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
  2360. for (i = 0; i < sh->disks; i++)
  2361. if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) {
  2362. int dd_idx, j;
  2363. struct stripe_head *sh2;
  2364. sector_t bn = compute_blocknr(sh, i);
  2365. sector_t s = raid5_compute_sector(conf, bn, 0,
  2366. &dd_idx, NULL);
  2367. sh2 = get_active_stripe(conf, s, 0, 1);
  2368. if (sh2 == NULL)
  2369. /* so far only the early blocks of this stripe
  2370. * have been requested. When later blocks
  2371. * get requested, we will try again
  2372. */
  2373. continue;
  2374. if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
  2375. test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
  2376. /* must have already done this block */
  2377. release_stripe(sh2);
  2378. continue;
  2379. }
  2380. /* place all the copies on one channel */
  2381. tx = async_memcpy(sh2->dev[dd_idx].page,
  2382. sh->dev[i].page, 0, 0, STRIPE_SIZE,
  2383. ASYNC_TX_DEP_ACK, tx, NULL, NULL);
  2384. set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
  2385. set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
  2386. for (j = 0; j < conf->raid_disks; j++)
  2387. if (j != sh2->pd_idx &&
  2388. (!r6s || j != sh2->qd_idx) &&
  2389. !test_bit(R5_Expanded, &sh2->dev[j].flags))
  2390. break;
  2391. if (j == conf->raid_disks) {
  2392. set_bit(STRIPE_EXPAND_READY, &sh2->state);
  2393. set_bit(STRIPE_HANDLE, &sh2->state);
  2394. }
  2395. release_stripe(sh2);
  2396. }
  2397. /* done submitting copies, wait for them to complete */
  2398. if (tx) {
  2399. async_tx_ack(tx);
  2400. dma_wait_for_async_tx(tx);
  2401. }
  2402. }
  2403. /*
  2404. * handle_stripe - do things to a stripe.
  2405. *
  2406. * We lock the stripe and then examine the state of various bits
  2407. * to see what needs to be done.
  2408. * Possible results:
  2409. * return some read request which now have data
  2410. * return some write requests which are safely on disc
  2411. * schedule a read on some buffers
  2412. * schedule a write of some buffers
  2413. * return confirmation of parity correctness
  2414. *
  2415. * buffers are taken off read_list or write_list, and bh_cache buffers
  2416. * get BH_Lock set before the stripe lock is released.
  2417. *
  2418. */
  2419. static bool handle_stripe5(struct stripe_head *sh)
  2420. {
  2421. raid5_conf_t *conf = sh->raid_conf;
  2422. int disks = sh->disks, i;
  2423. struct bio *return_bi = NULL;
  2424. struct stripe_head_state s;
  2425. struct r5dev *dev;
  2426. mdk_rdev_t *blocked_rdev = NULL;
  2427. int prexor;
  2428. memset(&s, 0, sizeof(s));
  2429. pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
  2430. "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state,
  2431. atomic_read(&sh->count), sh->pd_idx, sh->check_state,
  2432. sh->reconstruct_state);
  2433. spin_lock(&sh->lock);
  2434. clear_bit(STRIPE_HANDLE, &sh->state);
  2435. clear_bit(STRIPE_DELAYED, &sh->state);
  2436. s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
  2437. s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
  2438. s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
  2439. /* Now to look around and see what can be done */
  2440. rcu_read_lock();
  2441. for (i=disks; i--; ) {
  2442. mdk_rdev_t *rdev;
  2443. struct r5dev *dev = &sh->dev[i];
  2444. clear_bit(R5_Insync, &dev->flags);
  2445. pr_debug("check %d: state 0x%lx toread %p read %p write %p "
  2446. "written %p\n", i, dev->flags, dev->toread, dev->read,
  2447. dev->towrite, dev->written);
  2448. /* maybe we can request a biofill operation
  2449. *
  2450. * new wantfill requests are only permitted while
  2451. * ops_complete_biofill is guaranteed to be inactive
  2452. */
  2453. if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
  2454. !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
  2455. set_bit(R5_Wantfill, &dev->flags);
  2456. /* now count some things */
  2457. if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
  2458. if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
  2459. if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
  2460. if (test_bit(R5_Wantfill, &dev->flags))
  2461. s.to_fill++;
  2462. else if (dev->toread)
  2463. s.to_read++;
  2464. if (dev->towrite) {
  2465. s.to_write++;
  2466. if (!test_bit(R5_OVERWRITE, &dev->flags))
  2467. s.non_overwrite++;
  2468. }
  2469. if (dev->written)
  2470. s.written++;
  2471. rdev = rcu_dereference(conf->disks[i].rdev);
  2472. if (blocked_rdev == NULL &&
  2473. rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
  2474. blocked_rdev = rdev;
  2475. atomic_inc(&rdev->nr_pending);
  2476. }
  2477. if (!rdev || !test_bit(In_sync, &rdev->flags)) {
  2478. /* The ReadError flag will just be confusing now */
  2479. clear_bit(R5_ReadError, &dev->flags);
  2480. clear_bit(R5_ReWrite, &dev->flags);
  2481. }
  2482. if (!rdev || !test_bit(In_sync, &rdev->flags)
  2483. || test_bit(R5_ReadError, &dev->flags)) {
  2484. s.failed++;
  2485. s.failed_num = i;
  2486. } else
  2487. set_bit(R5_Insync, &dev->flags);
  2488. }
  2489. rcu_read_unlock();
  2490. if (unlikely(blocked_rdev)) {
  2491. if (s.syncing || s.expanding || s.expanded ||
  2492. s.to_write || s.written) {
  2493. set_bit(STRIPE_HANDLE, &sh->state);
  2494. goto unlock;
  2495. }
  2496. /* There is nothing for the blocked_rdev to block */
  2497. rdev_dec_pending(blocked_rdev, conf->mddev);
  2498. blocked_rdev = NULL;
  2499. }
  2500. if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
  2501. set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
  2502. set_bit(STRIPE_BIOFILL_RUN, &sh->state);
  2503. }
  2504. pr_debug("locked=%d uptodate=%d to_read=%d"
  2505. " to_write=%d failed=%d failed_num=%d\n",
  2506. s.locked, s.uptodate, s.to_read, s.to_write,
  2507. s.failed, s.failed_num);
  2508. /* check if the array has lost two devices and, if so, some requests might
  2509. * need to be failed
  2510. */
  2511. if (s.failed > 1 && s.to_read+s.to_write+s.written)
  2512. handle_failed_stripe(conf, sh, &s, disks, &return_bi);
  2513. if (s.failed > 1 && s.syncing) {
  2514. md_done_sync(conf->mddev, STRIPE_SECTORS,0);
  2515. clear_bit(STRIPE_SYNCING, &sh->state);
  2516. s.syncing = 0;
  2517. }
  2518. /* might be able to return some write requests if the parity block
  2519. * is safe, or on a failed drive
  2520. */
  2521. dev = &sh->dev[sh->pd_idx];
  2522. if ( s.written &&
  2523. ((test_bit(R5_Insync, &dev->flags) &&
  2524. !test_bit(R5_LOCKED, &dev->flags) &&
  2525. test_bit(R5_UPTODATE, &dev->flags)) ||
  2526. (s.failed == 1 && s.failed_num == sh->pd_idx)))
  2527. handle_stripe_clean_event(conf, sh, disks, &return_bi);
  2528. /* Now we might consider reading some blocks, either to check/generate
  2529. * parity, or to satisfy requests
  2530. * or to load a block that is being partially written.
  2531. */
  2532. if (s.to_read || s.non_overwrite ||
  2533. (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
  2534. handle_stripe_fill5(sh, &s, disks);
  2535. /* Now we check to see if any write operations have recently
  2536. * completed
  2537. */
  2538. prexor = 0;
  2539. if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
  2540. prexor = 1;
  2541. if (sh->reconstruct_state == reconstruct_state_drain_result ||
  2542. sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
  2543. sh->reconstruct_state = reconstruct_state_idle;
  2544. /* All the 'written' buffers and the parity block are ready to
  2545. * be written back to disk
  2546. */
  2547. BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
  2548. for (i = disks; i--; ) {
  2549. dev = &sh->dev[i];
  2550. if (test_bit(R5_LOCKED, &dev->flags) &&
  2551. (i == sh->pd_idx || dev->written)) {
  2552. pr_debug("Writing block %d\n", i);
  2553. set_bit(R5_Wantwrite, &dev->flags);
  2554. if (prexor)
  2555. continue;
  2556. if (!test_bit(R5_Insync, &dev->flags) ||
  2557. (i == sh->pd_idx && s.failed == 0))
  2558. set_bit(STRIPE_INSYNC, &sh->state);
  2559. }
  2560. }
  2561. if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
  2562. atomic_dec(&conf->preread_active_stripes);
  2563. if (atomic_read(&conf->preread_active_stripes) <
  2564. IO_THRESHOLD)
  2565. md_wakeup_thread(conf->mddev->thread);
  2566. }
  2567. }
  2568. /* Now to consider new write requests and what else, if anything
  2569. * should be read. We do not handle new writes when:
  2570. * 1/ A 'write' operation (copy+xor) is already in flight.
  2571. * 2/ A 'check' operation is in flight, as it may clobber the parity
  2572. * block.
  2573. */
  2574. if (s.to_write && !sh->reconstruct_state && !sh->check_state)
  2575. handle_stripe_dirtying5(conf, sh, &s, disks);
  2576. /* maybe we need to check and possibly fix the parity for this stripe
  2577. * Any reads will already have been scheduled, so we just see if enough
  2578. * data is available. The parity check is held off while parity
  2579. * dependent operations are in flight.
  2580. */
  2581. if (sh->check_state ||
  2582. (s.syncing && s.locked == 0 &&
  2583. !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
  2584. !test_bit(STRIPE_INSYNC, &sh->state)))
  2585. handle_parity_checks5(conf, sh, &s, disks);
  2586. if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
  2587. md_done_sync(conf->mddev, STRIPE_SECTORS,1);
  2588. clear_bit(STRIPE_SYNCING, &sh->state);
  2589. }
  2590. /* If the failed drive is just a ReadError, then we might need to progress
  2591. * the repair/check process
  2592. */
  2593. if (s.failed == 1 && !conf->mddev->ro &&
  2594. test_bit(R5_ReadError, &sh->dev[s.failed_num].flags)
  2595. && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags)
  2596. && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags)
  2597. ) {
  2598. dev = &sh->dev[s.failed_num];
  2599. if (!test_bit(R5_ReWrite, &dev->flags)) {
  2600. set_bit(R5_Wantwrite, &dev->flags);
  2601. set_bit(R5_ReWrite, &dev->flags);
  2602. set_bit(R5_LOCKED, &dev->flags);
  2603. s.locked++;
  2604. } else {
  2605. /* let's read it back */
  2606. set_bit(R5_Wantread, &dev->flags);
  2607. set_bit(R5_LOCKED, &dev->flags);
  2608. s.locked++;
  2609. }
  2610. }
  2611. /* Finish reconstruct operations initiated by the expansion process */
  2612. if (sh->reconstruct_state == reconstruct_state_result) {
  2613. sh->reconstruct_state = reconstruct_state_idle;
  2614. clear_bit(STRIPE_EXPANDING, &sh->state);
  2615. for (i = conf->raid_disks; i--; ) {
  2616. set_bit(R5_Wantwrite, &sh->dev[i].flags);
  2617. set_bit(R5_LOCKED, &sh->dev[i].flags);
  2618. s.locked++;
  2619. }
  2620. }
  2621. if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
  2622. !sh->reconstruct_state) {
  2623. /* Need to write out all blocks after computing parity */
  2624. sh->disks = conf->raid_disks;
  2625. stripe_set_idx(sh->sector, conf, 0, sh);
  2626. schedule_reconstruction5(sh, &s, 1, 1);
  2627. } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
  2628. clear_bit(STRIPE_EXPAND_READY, &sh->state);
  2629. atomic_dec(&conf->reshape_stripes);
  2630. wake_up(&conf->wait_for_overlap);
  2631. md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
  2632. }
  2633. if (s.expanding && s.locked == 0 &&
  2634. !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
  2635. handle_stripe_expansion(conf, sh, NULL);
  2636. unlock:
  2637. spin_unlock(&sh->lock);
  2638. /* wait for this device to become unblocked */
  2639. if (unlikely(blocked_rdev))
  2640. md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
  2641. if (s.ops_request)
  2642. raid5_run_ops(sh, s.ops_request);
  2643. ops_run_io(sh, &s);
  2644. return_io(return_bi);
  2645. return blocked_rdev == NULL;
  2646. }
  2647. static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
  2648. {
  2649. raid5_conf_t *conf = sh->raid_conf;
  2650. int disks = sh->disks;
  2651. struct bio *return_bi = NULL;
  2652. int i, pd_idx = sh->pd_idx;
  2653. struct stripe_head_state s;
  2654. struct r6_state r6s;
  2655. struct r5dev *dev, *pdev, *qdev;
  2656. mdk_rdev_t *blocked_rdev = NULL;
  2657. r6s.qd_idx = sh->qd_idx;
  2658. pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
  2659. "pd_idx=%d, qd_idx=%d\n",
  2660. (unsigned long long)sh->sector, sh->state,
  2661. atomic_read(&sh->count), pd_idx, r6s.qd_idx);
  2662. memset(&s, 0, sizeof(s));
  2663. spin_lock(&sh->lock);
  2664. clear_bit(STRIPE_HANDLE, &sh->state);
  2665. clear_bit(STRIPE_DELAYED, &sh->state);
  2666. s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
  2667. s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
  2668. s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
  2669. /* Now to look around and see what can be done */
  2670. rcu_read_lock();
  2671. for (i=disks; i--; ) {
  2672. mdk_rdev_t *rdev;
  2673. dev = &sh->dev[i];
  2674. clear_bit(R5_Insync, &dev->flags);
  2675. pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
  2676. i, dev->flags, dev->toread, dev->towrite, dev->written);
  2677. /* maybe we can reply to a read */
  2678. if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
  2679. struct bio *rbi, *rbi2;
  2680. pr_debug("Return read for disc %d\n", i);
  2681. spin_lock_irq(&conf->device_lock);
  2682. rbi = dev->toread;
  2683. dev->toread = NULL;
  2684. if (test_and_clear_bit(R5_Overlap, &dev->flags))
  2685. wake_up(&conf->wait_for_overlap);
  2686. spin_unlock_irq(&conf->device_lock);
  2687. while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
  2688. copy_data(0, rbi, dev->page, dev->sector);
  2689. rbi2 = r5_next_bio(rbi, dev->sector);
  2690. spin_lock_irq(&conf->device_lock);
  2691. if (!raid5_dec_bi_phys_segments(rbi)) {
  2692. rbi->bi_next = return_bi;
  2693. return_bi = rbi;
  2694. }
  2695. spin_unlock_irq(&conf->device_lock);
  2696. rbi = rbi2;
  2697. }
  2698. }
  2699. /* now count some things */
  2700. if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
  2701. if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
  2702. if (dev->toread)
  2703. s.to_read++;
  2704. if (dev->towrite) {
  2705. s.to_write++;
  2706. if (!test_bit(R5_OVERWRITE, &dev->flags))
  2707. s.non_overwrite++;
  2708. }
  2709. if (dev->written)
  2710. s.written++;
  2711. rdev = rcu_dereference(conf->disks[i].rdev);
  2712. if (blocked_rdev == NULL &&
  2713. rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
  2714. blocked_rdev = rdev;
  2715. atomic_inc(&rdev->nr_pending);
  2716. }
  2717. if (!rdev || !test_bit(In_sync, &rdev->flags)) {
  2718. /* The ReadError flag will just be confusing now */
  2719. clear_bit(R5_ReadError, &dev->flags);
  2720. clear_bit(R5_ReWrite, &dev->flags);
  2721. }
  2722. if (!rdev || !test_bit(In_sync, &rdev->flags)
  2723. || test_bit(R5_ReadError, &dev->flags)) {
  2724. if (s.failed < 2)
  2725. r6s.failed_num[s.failed] = i;
  2726. s.failed++;
  2727. } else
  2728. set_bit(R5_Insync, &dev->flags);
  2729. }
  2730. rcu_read_unlock();
  2731. if (unlikely(blocked_rdev)) {
  2732. if (s.syncing || s.expanding || s.expanded ||
  2733. s.to_write || s.written) {
  2734. set_bit(STRIPE_HANDLE, &sh->state);
  2735. goto unlock;
  2736. }
  2737. /* There is nothing for the blocked_rdev to block */
  2738. rdev_dec_pending(blocked_rdev, conf->mddev);
  2739. blocked_rdev = NULL;
  2740. }
  2741. pr_debug("locked=%d uptodate=%d to_read=%d"
  2742. " to_write=%d failed=%d failed_num=%d,%d\n",
  2743. s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
  2744. r6s.failed_num[0], r6s.failed_num[1]);
  2745. /* check if the array has lost >2 devices and, if so, some requests
  2746. * might need to be failed
  2747. */
  2748. if (s.failed > 2 && s.to_read+s.to_write+s.written)
  2749. handle_failed_stripe(conf, sh, &s, disks, &return_bi);
  2750. if (s.failed > 2 && s.syncing) {
  2751. md_done_sync(conf->mddev, STRIPE_SECTORS,0);
  2752. clear_bit(STRIPE_SYNCING, &sh->state);
  2753. s.syncing = 0;
  2754. }
  2755. /*
  2756. * might be able to return some write requests if the parity blocks
  2757. * are safe, or on a failed drive
  2758. */
  2759. pdev = &sh->dev[pd_idx];
  2760. r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx)
  2761. || (s.failed >= 2 && r6s.failed_num[1] == pd_idx);
  2762. qdev = &sh->dev[r6s.qd_idx];
  2763. r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == r6s.qd_idx)
  2764. || (s.failed >= 2 && r6s.failed_num[1] == r6s.qd_idx);
  2765. if ( s.written &&
  2766. ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
  2767. && !test_bit(R5_LOCKED, &pdev->flags)
  2768. && test_bit(R5_UPTODATE, &pdev->flags)))) &&
  2769. ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
  2770. && !test_bit(R5_LOCKED, &qdev->flags)
  2771. && test_bit(R5_UPTODATE, &qdev->flags)))))
  2772. handle_stripe_clean_event(conf, sh, disks, &return_bi);
  2773. /* Now we might consider reading some blocks, either to check/generate
  2774. * parity, or to satisfy requests
  2775. * or to load a block that is being partially written.
  2776. */
  2777. if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
  2778. (s.syncing && (s.uptodate < disks)) || s.expanding)
  2779. handle_stripe_fill6(sh, &s, &r6s, disks);
  2780. /* now to consider writing and what else, if anything should be read */
  2781. if (s.to_write)
  2782. handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
  2783. /* maybe we need to check and possibly fix the parity for this stripe
  2784. * Any reads will already have been scheduled, so we just see if enough
  2785. * data is available
  2786. */
  2787. if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state))
  2788. handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks);
  2789. if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
  2790. md_done_sync(conf->mddev, STRIPE_SECTORS,1);
  2791. clear_bit(STRIPE_SYNCING, &sh->state);
  2792. }
  2793. /* If the failed drives are just a ReadError, then we might need
  2794. * to progress the repair/check process
  2795. */
  2796. if (s.failed <= 2 && !conf->mddev->ro)
  2797. for (i = 0; i < s.failed; i++) {
  2798. dev = &sh->dev[r6s.failed_num[i]];
  2799. if (test_bit(R5_ReadError, &dev->flags)
  2800. && !test_bit(R5_LOCKED, &dev->flags)
  2801. && test_bit(R5_UPTODATE, &dev->flags)
  2802. ) {
  2803. if (!test_bit(R5_ReWrite, &dev->flags)) {
  2804. set_bit(R5_Wantwrite, &dev->flags);
  2805. set_bit(R5_ReWrite, &dev->flags);
  2806. set_bit(R5_LOCKED, &dev->flags);
  2807. } else {
  2808. /* let's read it back */
  2809. set_bit(R5_Wantread, &dev->flags);
  2810. set_bit(R5_LOCKED, &dev->flags);
  2811. }
  2812. }
  2813. }
  2814. if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
  2815. /* Need to write out all blocks after computing P&Q */
  2816. sh->disks = conf->raid_disks;
  2817. stripe_set_idx(sh->sector, conf, 0, sh);
  2818. compute_parity6(sh, RECONSTRUCT_WRITE);
  2819. for (i = conf->raid_disks ; i-- ; ) {
  2820. set_bit(R5_LOCKED, &sh->dev[i].flags);
  2821. s.locked++;
  2822. set_bit(R5_Wantwrite, &sh->dev[i].flags);
  2823. }
  2824. clear_bit(STRIPE_EXPANDING, &sh->state);
  2825. } else if (s.expanded) {
  2826. clear_bit(STRIPE_EXPAND_READY, &sh->state);
  2827. atomic_dec(&conf->reshape_stripes);
  2828. wake_up(&conf->wait_for_overlap);
  2829. md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
  2830. }
  2831. if (s.expanding && s.locked == 0 &&
  2832. !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
  2833. handle_stripe_expansion(conf, sh, &r6s);
  2834. unlock:
  2835. spin_unlock(&sh->lock);
  2836. /* wait for this device to become unblocked */
  2837. if (unlikely(blocked_rdev))
  2838. md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
  2839. ops_run_io(sh, &s);
  2840. return_io(return_bi);
  2841. return blocked_rdev == NULL;
  2842. }
  2843. /* returns true if the stripe was handled */
  2844. static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page)
  2845. {
  2846. if (sh->raid_conf->level == 6)
  2847. return handle_stripe6(sh, tmp_page);
  2848. else
  2849. return handle_stripe5(sh);
  2850. }
  2851. static void raid5_activate_delayed(raid5_conf_t *conf)
  2852. {
  2853. if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
  2854. while (!list_empty(&conf->delayed_list)) {
  2855. struct list_head *l = conf->delayed_list.next;
  2856. struct stripe_head *sh;
  2857. sh = list_entry(l, struct stripe_head, lru);
  2858. list_del_init(l);
  2859. clear_bit(STRIPE_DELAYED, &sh->state);
  2860. if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  2861. atomic_inc(&conf->preread_active_stripes);
  2862. list_add_tail(&sh->lru, &conf->hold_list);
  2863. }
  2864. } else
  2865. blk_plug_device(conf->mddev->queue);
  2866. }
  2867. static void activate_bit_delay(raid5_conf_t *conf)
  2868. {
  2869. /* device_lock is held */
  2870. struct list_head head;
  2871. list_add(&head, &conf->bitmap_list);
  2872. list_del_init(&conf->bitmap_list);
  2873. while (!list_empty(&head)) {
  2874. struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
  2875. list_del_init(&sh->lru);
  2876. atomic_inc(&sh->count);
  2877. __release_stripe(conf, sh);
  2878. }
  2879. }
  2880. static void unplug_slaves(mddev_t *mddev)
  2881. {
  2882. raid5_conf_t *conf = mddev_to_conf(mddev);
  2883. int i;
  2884. rcu_read_lock();
  2885. for (i=0; i<mddev->raid_disks; i++) {
  2886. mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
  2887. if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
  2888. struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
  2889. atomic_inc(&rdev->nr_pending);
  2890. rcu_read_unlock();
  2891. blk_unplug(r_queue);
  2892. rdev_dec_pending(rdev, mddev);
  2893. rcu_read_lock();
  2894. }
  2895. }
  2896. rcu_read_unlock();
  2897. }
  2898. static void raid5_unplug_device(struct request_queue *q)
  2899. {
  2900. mddev_t *mddev = q->queuedata;
  2901. raid5_conf_t *conf = mddev_to_conf(mddev);
  2902. unsigned long flags;
  2903. spin_lock_irqsave(&conf->device_lock, flags);
  2904. if (blk_remove_plug(q)) {
  2905. conf->seq_flush++;
  2906. raid5_activate_delayed(conf);
  2907. }
  2908. md_wakeup_thread(mddev->thread);
  2909. spin_unlock_irqrestore(&conf->device_lock, flags);
  2910. unplug_slaves(mddev);
  2911. }
  2912. static int raid5_congested(void *data, int bits)
  2913. {
  2914. mddev_t *mddev = data;
  2915. raid5_conf_t *conf = mddev_to_conf(mddev);
  2916. /* No difference between reads and writes. Just check
  2917. * how busy the stripe_cache is
  2918. */
  2919. if (conf->inactive_blocked)
  2920. return 1;
  2921. if (conf->quiesce)
  2922. return 1;
  2923. if (list_empty_careful(&conf->inactive_list))
  2924. return 1;
  2925. return 0;
  2926. }
  2927. /* We want read requests to align with chunks where possible,
  2928. * but write requests don't need to.
  2929. */
  2930. static int raid5_mergeable_bvec(struct request_queue *q,
  2931. struct bvec_merge_data *bvm,
  2932. struct bio_vec *biovec)
  2933. {
  2934. mddev_t *mddev = q->queuedata;
  2935. sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
  2936. int max;
  2937. unsigned int chunk_sectors = mddev->chunk_size >> 9;
  2938. unsigned int bio_sectors = bvm->bi_size >> 9;
  2939. if ((bvm->bi_rw & 1) == WRITE)
  2940. return biovec->bv_len; /* always allow writes to be mergeable */
  2941. max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
  2942. if (max < 0) max = 0;
  2943. if (max <= biovec->bv_len && bio_sectors == 0)
  2944. return biovec->bv_len;
  2945. else
  2946. return max;
  2947. }
  2948. static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
  2949. {
  2950. sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
  2951. unsigned int chunk_sectors = mddev->chunk_size >> 9;
  2952. unsigned int bio_sectors = bio->bi_size >> 9;
  2953. return chunk_sectors >=
  2954. ((sector & (chunk_sectors - 1)) + bio_sectors);
  2955. }
  2956. /*
  2957. * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
  2958. * later sampled by raid5d.
  2959. */
  2960. static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf)
  2961. {
  2962. unsigned long flags;
  2963. spin_lock_irqsave(&conf->device_lock, flags);
  2964. bi->bi_next = conf->retry_read_aligned_list;
  2965. conf->retry_read_aligned_list = bi;
  2966. spin_unlock_irqrestore(&conf->device_lock, flags);
  2967. md_wakeup_thread(conf->mddev->thread);
  2968. }
  2969. static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
  2970. {
  2971. struct bio *bi;
  2972. bi = conf->retry_read_aligned;
  2973. if (bi) {
  2974. conf->retry_read_aligned = NULL;
  2975. return bi;
  2976. }
  2977. bi = conf->retry_read_aligned_list;
  2978. if(bi) {
  2979. conf->retry_read_aligned_list = bi->bi_next;
  2980. bi->bi_next = NULL;
  2981. /*
  2982. * this sets the active strip count to 1 and the processed
  2983. * strip count to zero (upper 8 bits)
  2984. */
  2985. bi->bi_phys_segments = 1; /* biased count of active stripes */
  2986. }
  2987. return bi;
  2988. }
  2989. /*
  2990. * The "raid5_align_endio" should check if the read succeeded and if it
  2991. * did, call bio_endio on the original bio (having bio_put the new bio
  2992. * first).
  2993. * If the read failed..
  2994. */
  2995. static void raid5_align_endio(struct bio *bi, int error)
  2996. {
  2997. struct bio* raid_bi = bi->bi_private;
  2998. mddev_t *mddev;
  2999. raid5_conf_t *conf;
  3000. int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
  3001. mdk_rdev_t *rdev;
  3002. bio_put(bi);
  3003. mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
  3004. conf = mddev_to_conf(mddev);
  3005. rdev = (void*)raid_bi->bi_next;
  3006. raid_bi->bi_next = NULL;
  3007. rdev_dec_pending(rdev, conf->mddev);
  3008. if (!error && uptodate) {
  3009. bio_endio(raid_bi, 0);
  3010. if (atomic_dec_and_test(&conf->active_aligned_reads))
  3011. wake_up(&conf->wait_for_stripe);
  3012. return;
  3013. }
  3014. pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
  3015. add_bio_to_retry(raid_bi, conf);
  3016. }
  3017. static int bio_fits_rdev(struct bio *bi)
  3018. {
  3019. struct request_queue *q = bdev_get_queue(bi->bi_bdev);
  3020. if ((bi->bi_size>>9) > q->max_sectors)
  3021. return 0;
  3022. blk_recount_segments(q, bi);
  3023. if (bi->bi_phys_segments > q->max_phys_segments)
  3024. return 0;
  3025. if (q->merge_bvec_fn)
  3026. /* it's too hard to apply the merge_bvec_fn at this stage,
  3027. * just just give up
  3028. */
  3029. return 0;
  3030. return 1;
  3031. }
  3032. static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
  3033. {
  3034. mddev_t *mddev = q->queuedata;
  3035. raid5_conf_t *conf = mddev_to_conf(mddev);
  3036. unsigned int dd_idx;
  3037. struct bio* align_bi;
  3038. mdk_rdev_t *rdev;
  3039. if (!in_chunk_boundary(mddev, raid_bio)) {
  3040. pr_debug("chunk_aligned_read : non aligned\n");
  3041. return 0;
  3042. }
  3043. /*
  3044. * use bio_clone to make a copy of the bio
  3045. */
  3046. align_bi = bio_clone(raid_bio, GFP_NOIO);
  3047. if (!align_bi)
  3048. return 0;
  3049. /*
  3050. * set bi_end_io to a new function, and set bi_private to the
  3051. * original bio.
  3052. */
  3053. align_bi->bi_end_io = raid5_align_endio;
  3054. align_bi->bi_private = raid_bio;
  3055. /*
  3056. * compute position
  3057. */
  3058. align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
  3059. 0,
  3060. &dd_idx, NULL);
  3061. rcu_read_lock();
  3062. rdev = rcu_dereference(conf->disks[dd_idx].rdev);
  3063. if (rdev && test_bit(In_sync, &rdev->flags)) {
  3064. atomic_inc(&rdev->nr_pending);
  3065. rcu_read_unlock();
  3066. raid_bio->bi_next = (void*)rdev;
  3067. align_bi->bi_bdev = rdev->bdev;
  3068. align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
  3069. align_bi->bi_sector += rdev->data_offset;
  3070. if (!bio_fits_rdev(align_bi)) {
  3071. /* too big in some way */
  3072. bio_put(align_bi);
  3073. rdev_dec_pending(rdev, mddev);
  3074. return 0;
  3075. }
  3076. spin_lock_irq(&conf->device_lock);
  3077. wait_event_lock_irq(conf->wait_for_stripe,
  3078. conf->quiesce == 0,
  3079. conf->device_lock, /* nothing */);
  3080. atomic_inc(&conf->active_aligned_reads);
  3081. spin_unlock_irq(&conf->device_lock);
  3082. generic_make_request(align_bi);
  3083. return 1;
  3084. } else {
  3085. rcu_read_unlock();
  3086. bio_put(align_bi);
  3087. return 0;
  3088. }
  3089. }
  3090. /* __get_priority_stripe - get the next stripe to process
  3091. *
  3092. * Full stripe writes are allowed to pass preread active stripes up until
  3093. * the bypass_threshold is exceeded. In general the bypass_count
  3094. * increments when the handle_list is handled before the hold_list; however, it
  3095. * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
  3096. * stripe with in flight i/o. The bypass_count will be reset when the
  3097. * head of the hold_list has changed, i.e. the head was promoted to the
  3098. * handle_list.
  3099. */
  3100. static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
  3101. {
  3102. struct stripe_head *sh;
  3103. pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
  3104. __func__,
  3105. list_empty(&conf->handle_list) ? "empty" : "busy",
  3106. list_empty(&conf->hold_list) ? "empty" : "busy",
  3107. atomic_read(&conf->pending_full_writes), conf->bypass_count);
  3108. if (!list_empty(&conf->handle_list)) {
  3109. sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
  3110. if (list_empty(&conf->hold_list))
  3111. conf->bypass_count = 0;
  3112. else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
  3113. if (conf->hold_list.next == conf->last_hold)
  3114. conf->bypass_count++;
  3115. else {
  3116. conf->last_hold = conf->hold_list.next;
  3117. conf->bypass_count -= conf->bypass_threshold;
  3118. if (conf->bypass_count < 0)
  3119. conf->bypass_count = 0;
  3120. }
  3121. }
  3122. } else if (!list_empty(&conf->hold_list) &&
  3123. ((conf->bypass_threshold &&
  3124. conf->bypass_count > conf->bypass_threshold) ||
  3125. atomic_read(&conf->pending_full_writes) == 0)) {
  3126. sh = list_entry(conf->hold_list.next,
  3127. typeof(*sh), lru);
  3128. conf->bypass_count -= conf->bypass_threshold;
  3129. if (conf->bypass_count < 0)
  3130. conf->bypass_count = 0;
  3131. } else
  3132. return NULL;
  3133. list_del_init(&sh->lru);
  3134. atomic_inc(&sh->count);
  3135. BUG_ON(atomic_read(&sh->count) != 1);
  3136. return sh;
  3137. }
  3138. static int make_request(struct request_queue *q, struct bio * bi)
  3139. {
  3140. mddev_t *mddev = q->queuedata;
  3141. raid5_conf_t *conf = mddev_to_conf(mddev);
  3142. int dd_idx;
  3143. sector_t new_sector;
  3144. sector_t logical_sector, last_sector;
  3145. struct stripe_head *sh;
  3146. const int rw = bio_data_dir(bi);
  3147. int cpu, remaining;
  3148. if (unlikely(bio_barrier(bi))) {
  3149. bio_endio(bi, -EOPNOTSUPP);
  3150. return 0;
  3151. }
  3152. md_write_start(mddev, bi);
  3153. cpu = part_stat_lock();
  3154. part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
  3155. part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
  3156. bio_sectors(bi));
  3157. part_stat_unlock();
  3158. if (rw == READ &&
  3159. mddev->reshape_position == MaxSector &&
  3160. chunk_aligned_read(q,bi))
  3161. return 0;
  3162. logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
  3163. last_sector = bi->bi_sector + (bi->bi_size>>9);
  3164. bi->bi_next = NULL;
  3165. bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
  3166. for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
  3167. DEFINE_WAIT(w);
  3168. int disks, data_disks;
  3169. int previous;
  3170. retry:
  3171. previous = 0;
  3172. prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
  3173. if (likely(conf->expand_progress == MaxSector))
  3174. disks = conf->raid_disks;
  3175. else {
  3176. /* spinlock is needed as expand_progress may be
  3177. * 64bit on a 32bit platform, and so it might be
  3178. * possible to see a half-updated value
  3179. * Ofcourse expand_progress could change after
  3180. * the lock is dropped, so once we get a reference
  3181. * to the stripe that we think it is, we will have
  3182. * to check again.
  3183. */
  3184. spin_lock_irq(&conf->device_lock);
  3185. disks = conf->raid_disks;
  3186. if (logical_sector >= conf->expand_progress) {
  3187. disks = conf->previous_raid_disks;
  3188. previous = 1;
  3189. } else {
  3190. if (logical_sector >= conf->expand_lo) {
  3191. spin_unlock_irq(&conf->device_lock);
  3192. schedule();
  3193. goto retry;
  3194. }
  3195. }
  3196. spin_unlock_irq(&conf->device_lock);
  3197. }
  3198. data_disks = disks - conf->max_degraded;
  3199. new_sector = raid5_compute_sector(conf, logical_sector,
  3200. previous,
  3201. &dd_idx, NULL);
  3202. pr_debug("raid5: make_request, sector %llu logical %llu\n",
  3203. (unsigned long long)new_sector,
  3204. (unsigned long long)logical_sector);
  3205. sh = get_active_stripe(conf, new_sector, previous,
  3206. (bi->bi_rw&RWA_MASK));
  3207. if (sh) {
  3208. if (unlikely(conf->expand_progress != MaxSector)) {
  3209. /* expansion might have moved on while waiting for a
  3210. * stripe, so we must do the range check again.
  3211. * Expansion could still move past after this
  3212. * test, but as we are holding a reference to
  3213. * 'sh', we know that if that happens,
  3214. * STRIPE_EXPANDING will get set and the expansion
  3215. * won't proceed until we finish with the stripe.
  3216. */
  3217. int must_retry = 0;
  3218. spin_lock_irq(&conf->device_lock);
  3219. if (logical_sector < conf->expand_progress &&
  3220. disks == conf->previous_raid_disks)
  3221. /* mismatch, need to try again */
  3222. must_retry = 1;
  3223. spin_unlock_irq(&conf->device_lock);
  3224. if (must_retry) {
  3225. release_stripe(sh);
  3226. goto retry;
  3227. }
  3228. }
  3229. /* FIXME what if we get a false positive because these
  3230. * are being updated.
  3231. */
  3232. if (logical_sector >= mddev->suspend_lo &&
  3233. logical_sector < mddev->suspend_hi) {
  3234. release_stripe(sh);
  3235. schedule();
  3236. goto retry;
  3237. }
  3238. if (test_bit(STRIPE_EXPANDING, &sh->state) ||
  3239. !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
  3240. /* Stripe is busy expanding or
  3241. * add failed due to overlap. Flush everything
  3242. * and wait a while
  3243. */
  3244. raid5_unplug_device(mddev->queue);
  3245. release_stripe(sh);
  3246. schedule();
  3247. goto retry;
  3248. }
  3249. finish_wait(&conf->wait_for_overlap, &w);
  3250. set_bit(STRIPE_HANDLE, &sh->state);
  3251. clear_bit(STRIPE_DELAYED, &sh->state);
  3252. release_stripe(sh);
  3253. } else {
  3254. /* cannot get stripe for read-ahead, just give-up */
  3255. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  3256. finish_wait(&conf->wait_for_overlap, &w);
  3257. break;
  3258. }
  3259. }
  3260. spin_lock_irq(&conf->device_lock);
  3261. remaining = raid5_dec_bi_phys_segments(bi);
  3262. spin_unlock_irq(&conf->device_lock);
  3263. if (remaining == 0) {
  3264. if ( rw == WRITE )
  3265. md_write_end(mddev);
  3266. bio_endio(bi, 0);
  3267. }
  3268. return 0;
  3269. }
  3270. static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
  3271. {
  3272. /* reshaping is quite different to recovery/resync so it is
  3273. * handled quite separately ... here.
  3274. *
  3275. * On each call to sync_request, we gather one chunk worth of
  3276. * destination stripes and flag them as expanding.
  3277. * Then we find all the source stripes and request reads.
  3278. * As the reads complete, handle_stripe will copy the data
  3279. * into the destination stripe and release that stripe.
  3280. */
  3281. raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
  3282. struct stripe_head *sh;
  3283. sector_t first_sector, last_sector;
  3284. int raid_disks = conf->previous_raid_disks;
  3285. int data_disks = raid_disks - conf->max_degraded;
  3286. int new_data_disks = conf->raid_disks - conf->max_degraded;
  3287. int i;
  3288. int dd_idx;
  3289. sector_t writepos, safepos, gap;
  3290. if (sector_nr == 0 &&
  3291. conf->expand_progress != 0) {
  3292. /* restarting in the middle, skip the initial sectors */
  3293. sector_nr = conf->expand_progress;
  3294. sector_div(sector_nr, new_data_disks);
  3295. *skipped = 1;
  3296. return sector_nr;
  3297. }
  3298. /* we update the metadata when there is more than 3Meg
  3299. * in the block range (that is rather arbitrary, should
  3300. * probably be time based) or when the data about to be
  3301. * copied would over-write the source of the data at
  3302. * the front of the range.
  3303. * i.e. one new_stripe forward from expand_progress new_maps
  3304. * to after where expand_lo old_maps to
  3305. */
  3306. writepos = conf->expand_progress +
  3307. conf->chunk_size/512*(new_data_disks);
  3308. sector_div(writepos, new_data_disks);
  3309. safepos = conf->expand_lo;
  3310. sector_div(safepos, data_disks);
  3311. gap = conf->expand_progress - conf->expand_lo;
  3312. if (writepos >= safepos ||
  3313. gap > (new_data_disks)*3000*2 /*3Meg*/) {
  3314. /* Cannot proceed until we've updated the superblock... */
  3315. wait_event(conf->wait_for_overlap,
  3316. atomic_read(&conf->reshape_stripes)==0);
  3317. mddev->reshape_position = conf->expand_progress;
  3318. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  3319. md_wakeup_thread(mddev->thread);
  3320. wait_event(mddev->sb_wait, mddev->flags == 0 ||
  3321. kthread_should_stop());
  3322. spin_lock_irq(&conf->device_lock);
  3323. conf->expand_lo = mddev->reshape_position;
  3324. spin_unlock_irq(&conf->device_lock);
  3325. wake_up(&conf->wait_for_overlap);
  3326. }
  3327. for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
  3328. int j;
  3329. int skipped = 0;
  3330. sh = get_active_stripe(conf, sector_nr+i, 0, 0);
  3331. set_bit(STRIPE_EXPANDING, &sh->state);
  3332. atomic_inc(&conf->reshape_stripes);
  3333. /* If any of this stripe is beyond the end of the old
  3334. * array, then we need to zero those blocks
  3335. */
  3336. for (j=sh->disks; j--;) {
  3337. sector_t s;
  3338. if (j == sh->pd_idx)
  3339. continue;
  3340. if (conf->level == 6 &&
  3341. j == sh->qd_idx)
  3342. continue;
  3343. s = compute_blocknr(sh, j);
  3344. if (s < mddev->array_sectors) {
  3345. skipped = 1;
  3346. continue;
  3347. }
  3348. memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
  3349. set_bit(R5_Expanded, &sh->dev[j].flags);
  3350. set_bit(R5_UPTODATE, &sh->dev[j].flags);
  3351. }
  3352. if (!skipped) {
  3353. set_bit(STRIPE_EXPAND_READY, &sh->state);
  3354. set_bit(STRIPE_HANDLE, &sh->state);
  3355. }
  3356. release_stripe(sh);
  3357. }
  3358. spin_lock_irq(&conf->device_lock);
  3359. conf->expand_progress = (sector_nr + i) * new_data_disks;
  3360. spin_unlock_irq(&conf->device_lock);
  3361. /* Ok, those stripe are ready. We can start scheduling
  3362. * reads on the source stripes.
  3363. * The source stripes are determined by mapping the first and last
  3364. * block on the destination stripes.
  3365. */
  3366. first_sector =
  3367. raid5_compute_sector(conf, sector_nr*(new_data_disks),
  3368. 1, &dd_idx, NULL);
  3369. last_sector =
  3370. raid5_compute_sector(conf, ((sector_nr+conf->chunk_size/512)
  3371. *(new_data_disks) - 1),
  3372. 1, &dd_idx, NULL);
  3373. if (last_sector >= mddev->dev_sectors)
  3374. last_sector = mddev->dev_sectors - 1;
  3375. while (first_sector <= last_sector) {
  3376. sh = get_active_stripe(conf, first_sector, 1, 0);
  3377. set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
  3378. set_bit(STRIPE_HANDLE, &sh->state);
  3379. release_stripe(sh);
  3380. first_sector += STRIPE_SECTORS;
  3381. }
  3382. /* If this takes us to the resync_max point where we have to pause,
  3383. * then we need to write out the superblock.
  3384. */
  3385. sector_nr += conf->chunk_size>>9;
  3386. if (sector_nr >= mddev->resync_max) {
  3387. /* Cannot proceed until we've updated the superblock... */
  3388. wait_event(conf->wait_for_overlap,
  3389. atomic_read(&conf->reshape_stripes) == 0);
  3390. mddev->reshape_position = conf->expand_progress;
  3391. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  3392. md_wakeup_thread(mddev->thread);
  3393. wait_event(mddev->sb_wait,
  3394. !test_bit(MD_CHANGE_DEVS, &mddev->flags)
  3395. || kthread_should_stop());
  3396. spin_lock_irq(&conf->device_lock);
  3397. conf->expand_lo = mddev->reshape_position;
  3398. spin_unlock_irq(&conf->device_lock);
  3399. wake_up(&conf->wait_for_overlap);
  3400. }
  3401. return conf->chunk_size>>9;
  3402. }
  3403. /* FIXME go_faster isn't used */
  3404. static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
  3405. {
  3406. raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
  3407. struct stripe_head *sh;
  3408. sector_t max_sector = mddev->dev_sectors;
  3409. int sync_blocks;
  3410. int still_degraded = 0;
  3411. int i;
  3412. if (sector_nr >= max_sector) {
  3413. /* just being told to finish up .. nothing much to do */
  3414. unplug_slaves(mddev);
  3415. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
  3416. end_reshape(conf);
  3417. return 0;
  3418. }
  3419. if (mddev->curr_resync < max_sector) /* aborted */
  3420. bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
  3421. &sync_blocks, 1);
  3422. else /* completed sync */
  3423. conf->fullsync = 0;
  3424. bitmap_close_sync(mddev->bitmap);
  3425. return 0;
  3426. }
  3427. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  3428. return reshape_request(mddev, sector_nr, skipped);
  3429. /* No need to check resync_max as we never do more than one
  3430. * stripe, and as resync_max will always be on a chunk boundary,
  3431. * if the check in md_do_sync didn't fire, there is no chance
  3432. * of overstepping resync_max here
  3433. */
  3434. /* if there is too many failed drives and we are trying
  3435. * to resync, then assert that we are finished, because there is
  3436. * nothing we can do.
  3437. */
  3438. if (mddev->degraded >= conf->max_degraded &&
  3439. test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  3440. sector_t rv = mddev->dev_sectors - sector_nr;
  3441. *skipped = 1;
  3442. return rv;
  3443. }
  3444. if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
  3445. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
  3446. !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
  3447. /* we can skip this block, and probably more */
  3448. sync_blocks /= STRIPE_SECTORS;
  3449. *skipped = 1;
  3450. return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
  3451. }
  3452. bitmap_cond_end_sync(mddev->bitmap, sector_nr);
  3453. sh = get_active_stripe(conf, sector_nr, 0, 1);
  3454. if (sh == NULL) {
  3455. sh = get_active_stripe(conf, sector_nr, 0, 0);
  3456. /* make sure we don't swamp the stripe cache if someone else
  3457. * is trying to get access
  3458. */
  3459. schedule_timeout_uninterruptible(1);
  3460. }
  3461. /* Need to check if array will still be degraded after recovery/resync
  3462. * We don't need to check the 'failed' flag as when that gets set,
  3463. * recovery aborts.
  3464. */
  3465. for (i=0; i<mddev->raid_disks; i++)
  3466. if (conf->disks[i].rdev == NULL)
  3467. still_degraded = 1;
  3468. bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
  3469. spin_lock(&sh->lock);
  3470. set_bit(STRIPE_SYNCING, &sh->state);
  3471. clear_bit(STRIPE_INSYNC, &sh->state);
  3472. spin_unlock(&sh->lock);
  3473. /* wait for any blocked device to be handled */
  3474. while(unlikely(!handle_stripe(sh, NULL)))
  3475. ;
  3476. release_stripe(sh);
  3477. return STRIPE_SECTORS;
  3478. }
  3479. static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
  3480. {
  3481. /* We may not be able to submit a whole bio at once as there
  3482. * may not be enough stripe_heads available.
  3483. * We cannot pre-allocate enough stripe_heads as we may need
  3484. * more than exist in the cache (if we allow ever large chunks).
  3485. * So we do one stripe head at a time and record in
  3486. * ->bi_hw_segments how many have been done.
  3487. *
  3488. * We *know* that this entire raid_bio is in one chunk, so
  3489. * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
  3490. */
  3491. struct stripe_head *sh;
  3492. int dd_idx;
  3493. sector_t sector, logical_sector, last_sector;
  3494. int scnt = 0;
  3495. int remaining;
  3496. int handled = 0;
  3497. logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
  3498. sector = raid5_compute_sector(conf, logical_sector,
  3499. 0, &dd_idx, NULL);
  3500. last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
  3501. for (; logical_sector < last_sector;
  3502. logical_sector += STRIPE_SECTORS,
  3503. sector += STRIPE_SECTORS,
  3504. scnt++) {
  3505. if (scnt < raid5_bi_hw_segments(raid_bio))
  3506. /* already done this stripe */
  3507. continue;
  3508. sh = get_active_stripe(conf, sector, 0, 1);
  3509. if (!sh) {
  3510. /* failed to get a stripe - must wait */
  3511. raid5_set_bi_hw_segments(raid_bio, scnt);
  3512. conf->retry_read_aligned = raid_bio;
  3513. return handled;
  3514. }
  3515. set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
  3516. if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
  3517. release_stripe(sh);
  3518. raid5_set_bi_hw_segments(raid_bio, scnt);
  3519. conf->retry_read_aligned = raid_bio;
  3520. return handled;
  3521. }
  3522. handle_stripe(sh, NULL);
  3523. release_stripe(sh);
  3524. handled++;
  3525. }
  3526. spin_lock_irq(&conf->device_lock);
  3527. remaining = raid5_dec_bi_phys_segments(raid_bio);
  3528. spin_unlock_irq(&conf->device_lock);
  3529. if (remaining == 0)
  3530. bio_endio(raid_bio, 0);
  3531. if (atomic_dec_and_test(&conf->active_aligned_reads))
  3532. wake_up(&conf->wait_for_stripe);
  3533. return handled;
  3534. }
  3535. /*
  3536. * This is our raid5 kernel thread.
  3537. *
  3538. * We scan the hash table for stripes which can be handled now.
  3539. * During the scan, completed stripes are saved for us by the interrupt
  3540. * handler, so that they will not have to wait for our next wakeup.
  3541. */
  3542. static void raid5d(mddev_t *mddev)
  3543. {
  3544. struct stripe_head *sh;
  3545. raid5_conf_t *conf = mddev_to_conf(mddev);
  3546. int handled;
  3547. pr_debug("+++ raid5d active\n");
  3548. md_check_recovery(mddev);
  3549. handled = 0;
  3550. spin_lock_irq(&conf->device_lock);
  3551. while (1) {
  3552. struct bio *bio;
  3553. if (conf->seq_flush != conf->seq_write) {
  3554. int seq = conf->seq_flush;
  3555. spin_unlock_irq(&conf->device_lock);
  3556. bitmap_unplug(mddev->bitmap);
  3557. spin_lock_irq(&conf->device_lock);
  3558. conf->seq_write = seq;
  3559. activate_bit_delay(conf);
  3560. }
  3561. while ((bio = remove_bio_from_retry(conf))) {
  3562. int ok;
  3563. spin_unlock_irq(&conf->device_lock);
  3564. ok = retry_aligned_read(conf, bio);
  3565. spin_lock_irq(&conf->device_lock);
  3566. if (!ok)
  3567. break;
  3568. handled++;
  3569. }
  3570. sh = __get_priority_stripe(conf);
  3571. if (!sh)
  3572. break;
  3573. spin_unlock_irq(&conf->device_lock);
  3574. handled++;
  3575. handle_stripe(sh, conf->spare_page);
  3576. release_stripe(sh);
  3577. spin_lock_irq(&conf->device_lock);
  3578. }
  3579. pr_debug("%d stripes handled\n", handled);
  3580. spin_unlock_irq(&conf->device_lock);
  3581. async_tx_issue_pending_all();
  3582. unplug_slaves(mddev);
  3583. pr_debug("--- raid5d inactive\n");
  3584. }
  3585. static ssize_t
  3586. raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
  3587. {
  3588. raid5_conf_t *conf = mddev_to_conf(mddev);
  3589. if (conf)
  3590. return sprintf(page, "%d\n", conf->max_nr_stripes);
  3591. else
  3592. return 0;
  3593. }
  3594. static ssize_t
  3595. raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
  3596. {
  3597. raid5_conf_t *conf = mddev_to_conf(mddev);
  3598. unsigned long new;
  3599. int err;
  3600. if (len >= PAGE_SIZE)
  3601. return -EINVAL;
  3602. if (!conf)
  3603. return -ENODEV;
  3604. if (strict_strtoul(page, 10, &new))
  3605. return -EINVAL;
  3606. if (new <= 16 || new > 32768)
  3607. return -EINVAL;
  3608. while (new < conf->max_nr_stripes) {
  3609. if (drop_one_stripe(conf))
  3610. conf->max_nr_stripes--;
  3611. else
  3612. break;
  3613. }
  3614. err = md_allow_write(mddev);
  3615. if (err)
  3616. return err;
  3617. while (new > conf->max_nr_stripes) {
  3618. if (grow_one_stripe(conf))
  3619. conf->max_nr_stripes++;
  3620. else break;
  3621. }
  3622. return len;
  3623. }
  3624. static struct md_sysfs_entry
  3625. raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
  3626. raid5_show_stripe_cache_size,
  3627. raid5_store_stripe_cache_size);
  3628. static ssize_t
  3629. raid5_show_preread_threshold(mddev_t *mddev, char *page)
  3630. {
  3631. raid5_conf_t *conf = mddev_to_conf(mddev);
  3632. if (conf)
  3633. return sprintf(page, "%d\n", conf->bypass_threshold);
  3634. else
  3635. return 0;
  3636. }
  3637. static ssize_t
  3638. raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
  3639. {
  3640. raid5_conf_t *conf = mddev_to_conf(mddev);
  3641. unsigned long new;
  3642. if (len >= PAGE_SIZE)
  3643. return -EINVAL;
  3644. if (!conf)
  3645. return -ENODEV;
  3646. if (strict_strtoul(page, 10, &new))
  3647. return -EINVAL;
  3648. if (new > conf->max_nr_stripes)
  3649. return -EINVAL;
  3650. conf->bypass_threshold = new;
  3651. return len;
  3652. }
  3653. static struct md_sysfs_entry
  3654. raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
  3655. S_IRUGO | S_IWUSR,
  3656. raid5_show_preread_threshold,
  3657. raid5_store_preread_threshold);
  3658. static ssize_t
  3659. stripe_cache_active_show(mddev_t *mddev, char *page)
  3660. {
  3661. raid5_conf_t *conf = mddev_to_conf(mddev);
  3662. if (conf)
  3663. return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
  3664. else
  3665. return 0;
  3666. }
  3667. static struct md_sysfs_entry
  3668. raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
  3669. static struct attribute *raid5_attrs[] = {
  3670. &raid5_stripecache_size.attr,
  3671. &raid5_stripecache_active.attr,
  3672. &raid5_preread_bypass_threshold.attr,
  3673. NULL,
  3674. };
  3675. static struct attribute_group raid5_attrs_group = {
  3676. .name = NULL,
  3677. .attrs = raid5_attrs,
  3678. };
  3679. static raid5_conf_t *setup_conf(mddev_t *mddev)
  3680. {
  3681. raid5_conf_t *conf;
  3682. int raid_disk, memory;
  3683. mdk_rdev_t *rdev;
  3684. struct disk_info *disk;
  3685. if (mddev->new_level != 5
  3686. && mddev->new_level != 4
  3687. && mddev->new_level != 6) {
  3688. printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
  3689. mdname(mddev), mddev->new_level);
  3690. return ERR_PTR(-EIO);
  3691. }
  3692. if ((mddev->new_level == 5
  3693. && !algorithm_valid_raid5(mddev->new_layout)) ||
  3694. (mddev->new_level == 6
  3695. && !algorithm_valid_raid6(mddev->new_layout))) {
  3696. printk(KERN_ERR "raid5: %s: layout %d not supported\n",
  3697. mdname(mddev), mddev->new_layout);
  3698. return ERR_PTR(-EIO);
  3699. }
  3700. if (mddev->new_level == 6 && mddev->raid_disks < 4) {
  3701. printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
  3702. mdname(mddev), mddev->raid_disks);
  3703. return ERR_PTR(-EINVAL);
  3704. }
  3705. if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) {
  3706. printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
  3707. mddev->new_chunk, mdname(mddev));
  3708. return ERR_PTR(-EINVAL);
  3709. }
  3710. conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
  3711. if (conf == NULL)
  3712. goto abort;
  3713. conf->raid_disks = mddev->raid_disks;
  3714. if (mddev->reshape_position == MaxSector)
  3715. conf->previous_raid_disks = mddev->raid_disks;
  3716. else
  3717. conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
  3718. conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info),
  3719. GFP_KERNEL);
  3720. if (!conf->disks)
  3721. goto abort;
  3722. conf->mddev = mddev;
  3723. if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
  3724. goto abort;
  3725. if (mddev->new_level == 6) {
  3726. conf->spare_page = alloc_page(GFP_KERNEL);
  3727. if (!conf->spare_page)
  3728. goto abort;
  3729. }
  3730. spin_lock_init(&conf->device_lock);
  3731. init_waitqueue_head(&conf->wait_for_stripe);
  3732. init_waitqueue_head(&conf->wait_for_overlap);
  3733. INIT_LIST_HEAD(&conf->handle_list);
  3734. INIT_LIST_HEAD(&conf->hold_list);
  3735. INIT_LIST_HEAD(&conf->delayed_list);
  3736. INIT_LIST_HEAD(&conf->bitmap_list);
  3737. INIT_LIST_HEAD(&conf->inactive_list);
  3738. atomic_set(&conf->active_stripes, 0);
  3739. atomic_set(&conf->preread_active_stripes, 0);
  3740. atomic_set(&conf->active_aligned_reads, 0);
  3741. conf->bypass_threshold = BYPASS_THRESHOLD;
  3742. pr_debug("raid5: run(%s) called.\n", mdname(mddev));
  3743. list_for_each_entry(rdev, &mddev->disks, same_set) {
  3744. raid_disk = rdev->raid_disk;
  3745. if (raid_disk >= conf->raid_disks
  3746. || raid_disk < 0)
  3747. continue;
  3748. disk = conf->disks + raid_disk;
  3749. disk->rdev = rdev;
  3750. if (test_bit(In_sync, &rdev->flags)) {
  3751. char b[BDEVNAME_SIZE];
  3752. printk(KERN_INFO "raid5: device %s operational as raid"
  3753. " disk %d\n", bdevname(rdev->bdev,b),
  3754. raid_disk);
  3755. } else
  3756. /* Cannot rely on bitmap to complete recovery */
  3757. conf->fullsync = 1;
  3758. }
  3759. conf->chunk_size = mddev->new_chunk;
  3760. conf->level = mddev->new_level;
  3761. if (conf->level == 6)
  3762. conf->max_degraded = 2;
  3763. else
  3764. conf->max_degraded = 1;
  3765. conf->algorithm = mddev->new_layout;
  3766. conf->max_nr_stripes = NR_STRIPES;
  3767. conf->expand_progress = mddev->reshape_position;
  3768. memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
  3769. conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
  3770. if (grow_stripes(conf, conf->max_nr_stripes)) {
  3771. printk(KERN_ERR
  3772. "raid5: couldn't allocate %dkB for buffers\n", memory);
  3773. goto abort;
  3774. } else
  3775. printk(KERN_INFO "raid5: allocated %dkB for %s\n",
  3776. memory, mdname(mddev));
  3777. conf->thread = md_register_thread(raid5d, mddev, "%s_raid5");
  3778. if (!conf->thread) {
  3779. printk(KERN_ERR
  3780. "raid5: couldn't allocate thread for %s\n",
  3781. mdname(mddev));
  3782. goto abort;
  3783. }
  3784. return conf;
  3785. abort:
  3786. if (conf) {
  3787. shrink_stripes(conf);
  3788. safe_put_page(conf->spare_page);
  3789. kfree(conf->disks);
  3790. kfree(conf->stripe_hashtbl);
  3791. kfree(conf);
  3792. return ERR_PTR(-EIO);
  3793. } else
  3794. return ERR_PTR(-ENOMEM);
  3795. }
  3796. static int run(mddev_t *mddev)
  3797. {
  3798. raid5_conf_t *conf;
  3799. int working_disks = 0;
  3800. mdk_rdev_t *rdev;
  3801. if (mddev->reshape_position != MaxSector) {
  3802. /* Check that we can continue the reshape.
  3803. * Currently only disks can change, it must
  3804. * increase, and we must be past the point where
  3805. * a stripe over-writes itself
  3806. */
  3807. sector_t here_new, here_old;
  3808. int old_disks;
  3809. int max_degraded = (mddev->level == 5 ? 1 : 2);
  3810. if (mddev->new_level != mddev->level ||
  3811. mddev->new_layout != mddev->layout ||
  3812. mddev->new_chunk != mddev->chunk_size) {
  3813. printk(KERN_ERR "raid5: %s: unsupported reshape "
  3814. "required - aborting.\n",
  3815. mdname(mddev));
  3816. return -EINVAL;
  3817. }
  3818. if (mddev->delta_disks <= 0) {
  3819. printk(KERN_ERR "raid5: %s: unsupported reshape "
  3820. "(reduce disks) required - aborting.\n",
  3821. mdname(mddev));
  3822. return -EINVAL;
  3823. }
  3824. old_disks = mddev->raid_disks - mddev->delta_disks;
  3825. /* reshape_position must be on a new-stripe boundary, and one
  3826. * further up in new geometry must map after here in old
  3827. * geometry.
  3828. */
  3829. here_new = mddev->reshape_position;
  3830. if (sector_div(here_new, (mddev->chunk_size>>9)*
  3831. (mddev->raid_disks - max_degraded))) {
  3832. printk(KERN_ERR "raid5: reshape_position not "
  3833. "on a stripe boundary\n");
  3834. return -EINVAL;
  3835. }
  3836. /* here_new is the stripe we will write to */
  3837. here_old = mddev->reshape_position;
  3838. sector_div(here_old, (mddev->chunk_size>>9)*
  3839. (old_disks-max_degraded));
  3840. /* here_old is the first stripe that we might need to read
  3841. * from */
  3842. if (here_new >= here_old) {
  3843. /* Reading from the same stripe as writing to - bad */
  3844. printk(KERN_ERR "raid5: reshape_position too early for "
  3845. "auto-recovery - aborting.\n");
  3846. return -EINVAL;
  3847. }
  3848. printk(KERN_INFO "raid5: reshape will continue\n");
  3849. /* OK, we should be able to continue; */
  3850. } else {
  3851. BUG_ON(mddev->level != mddev->new_level);
  3852. BUG_ON(mddev->layout != mddev->new_layout);
  3853. BUG_ON(mddev->chunk_size != mddev->new_chunk);
  3854. BUG_ON(mddev->delta_disks != 0);
  3855. }
  3856. conf = setup_conf(mddev);
  3857. if (conf == NULL)
  3858. return -EIO;
  3859. if (IS_ERR(conf))
  3860. return PTR_ERR(conf);
  3861. mddev->thread = conf->thread;
  3862. conf->thread = NULL;
  3863. mddev->private = conf;
  3864. /*
  3865. * 0 for a fully functional array, 1 or 2 for a degraded array.
  3866. */
  3867. list_for_each_entry(rdev, &mddev->disks, same_set)
  3868. if (rdev->raid_disk >= 0 &&
  3869. test_bit(In_sync, &rdev->flags))
  3870. working_disks++;
  3871. mddev->degraded = conf->raid_disks - working_disks;
  3872. if (mddev->degraded > conf->max_degraded) {
  3873. printk(KERN_ERR "raid5: not enough operational devices for %s"
  3874. " (%d/%d failed)\n",
  3875. mdname(mddev), mddev->degraded, conf->raid_disks);
  3876. goto abort;
  3877. }
  3878. /* device size must be a multiple of chunk size */
  3879. mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1);
  3880. mddev->resync_max_sectors = mddev->dev_sectors;
  3881. if (mddev->degraded > 0 &&
  3882. mddev->recovery_cp != MaxSector) {
  3883. if (mddev->ok_start_degraded)
  3884. printk(KERN_WARNING
  3885. "raid5: starting dirty degraded array: %s"
  3886. "- data corruption possible.\n",
  3887. mdname(mddev));
  3888. else {
  3889. printk(KERN_ERR
  3890. "raid5: cannot start dirty degraded array for %s\n",
  3891. mdname(mddev));
  3892. goto abort;
  3893. }
  3894. }
  3895. if (mddev->degraded == 0)
  3896. printk("raid5: raid level %d set %s active with %d out of %d"
  3897. " devices, algorithm %d\n", conf->level, mdname(mddev),
  3898. mddev->raid_disks-mddev->degraded, mddev->raid_disks,
  3899. conf->algorithm);
  3900. else
  3901. printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
  3902. " out of %d devices, algorithm %d\n", conf->level,
  3903. mdname(mddev), mddev->raid_disks - mddev->degraded,
  3904. mddev->raid_disks, conf->algorithm);
  3905. print_raid5_conf(conf);
  3906. if (conf->expand_progress != MaxSector) {
  3907. printk("...ok start reshape thread\n");
  3908. conf->expand_lo = conf->expand_progress;
  3909. atomic_set(&conf->reshape_stripes, 0);
  3910. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  3911. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  3912. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  3913. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  3914. mddev->sync_thread = md_register_thread(md_do_sync, mddev,
  3915. "%s_reshape");
  3916. }
  3917. /* read-ahead size must cover two whole stripes, which is
  3918. * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
  3919. */
  3920. {
  3921. int data_disks = conf->previous_raid_disks - conf->max_degraded;
  3922. int stripe = data_disks *
  3923. (mddev->chunk_size / PAGE_SIZE);
  3924. if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
  3925. mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
  3926. }
  3927. /* Ok, everything is just fine now */
  3928. if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
  3929. printk(KERN_WARNING
  3930. "raid5: failed to create sysfs attributes for %s\n",
  3931. mdname(mddev));
  3932. mddev->queue->queue_lock = &conf->device_lock;
  3933. mddev->queue->unplug_fn = raid5_unplug_device;
  3934. mddev->queue->backing_dev_info.congested_data = mddev;
  3935. mddev->queue->backing_dev_info.congested_fn = raid5_congested;
  3936. mddev->array_sectors = mddev->dev_sectors *
  3937. (conf->previous_raid_disks - conf->max_degraded);
  3938. blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
  3939. return 0;
  3940. abort:
  3941. if (mddev->thread)
  3942. md_unregister_thread(mddev->thread);
  3943. mddev->thread = NULL;
  3944. if (conf) {
  3945. shrink_stripes(conf);
  3946. print_raid5_conf(conf);
  3947. safe_put_page(conf->spare_page);
  3948. kfree(conf->disks);
  3949. kfree(conf->stripe_hashtbl);
  3950. kfree(conf);
  3951. }
  3952. mddev->private = NULL;
  3953. printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
  3954. return -EIO;
  3955. }
  3956. static int stop(mddev_t *mddev)
  3957. {
  3958. raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
  3959. md_unregister_thread(mddev->thread);
  3960. mddev->thread = NULL;
  3961. shrink_stripes(conf);
  3962. kfree(conf->stripe_hashtbl);
  3963. mddev->queue->backing_dev_info.congested_fn = NULL;
  3964. blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
  3965. sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
  3966. kfree(conf->disks);
  3967. kfree(conf);
  3968. mddev->private = NULL;
  3969. return 0;
  3970. }
  3971. #ifdef DEBUG
  3972. static void print_sh(struct seq_file *seq, struct stripe_head *sh)
  3973. {
  3974. int i;
  3975. seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
  3976. (unsigned long long)sh->sector, sh->pd_idx, sh->state);
  3977. seq_printf(seq, "sh %llu, count %d.\n",
  3978. (unsigned long long)sh->sector, atomic_read(&sh->count));
  3979. seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
  3980. for (i = 0; i < sh->disks; i++) {
  3981. seq_printf(seq, "(cache%d: %p %ld) ",
  3982. i, sh->dev[i].page, sh->dev[i].flags);
  3983. }
  3984. seq_printf(seq, "\n");
  3985. }
  3986. static void printall(struct seq_file *seq, raid5_conf_t *conf)
  3987. {
  3988. struct stripe_head *sh;
  3989. struct hlist_node *hn;
  3990. int i;
  3991. spin_lock_irq(&conf->device_lock);
  3992. for (i = 0; i < NR_HASH; i++) {
  3993. hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
  3994. if (sh->raid_conf != conf)
  3995. continue;
  3996. print_sh(seq, sh);
  3997. }
  3998. }
  3999. spin_unlock_irq(&conf->device_lock);
  4000. }
  4001. #endif
  4002. static void status(struct seq_file *seq, mddev_t *mddev)
  4003. {
  4004. raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
  4005. int i;
  4006. seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
  4007. seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
  4008. for (i = 0; i < conf->raid_disks; i++)
  4009. seq_printf (seq, "%s",
  4010. conf->disks[i].rdev &&
  4011. test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
  4012. seq_printf (seq, "]");
  4013. #ifdef DEBUG
  4014. seq_printf (seq, "\n");
  4015. printall(seq, conf);
  4016. #endif
  4017. }
  4018. static void print_raid5_conf (raid5_conf_t *conf)
  4019. {
  4020. int i;
  4021. struct disk_info *tmp;
  4022. printk("RAID5 conf printout:\n");
  4023. if (!conf) {
  4024. printk("(conf==NULL)\n");
  4025. return;
  4026. }
  4027. printk(" --- rd:%d wd:%d\n", conf->raid_disks,
  4028. conf->raid_disks - conf->mddev->degraded);
  4029. for (i = 0; i < conf->raid_disks; i++) {
  4030. char b[BDEVNAME_SIZE];
  4031. tmp = conf->disks + i;
  4032. if (tmp->rdev)
  4033. printk(" disk %d, o:%d, dev:%s\n",
  4034. i, !test_bit(Faulty, &tmp->rdev->flags),
  4035. bdevname(tmp->rdev->bdev,b));
  4036. }
  4037. }
  4038. static int raid5_spare_active(mddev_t *mddev)
  4039. {
  4040. int i;
  4041. raid5_conf_t *conf = mddev->private;
  4042. struct disk_info *tmp;
  4043. for (i = 0; i < conf->raid_disks; i++) {
  4044. tmp = conf->disks + i;
  4045. if (tmp->rdev
  4046. && !test_bit(Faulty, &tmp->rdev->flags)
  4047. && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
  4048. unsigned long flags;
  4049. spin_lock_irqsave(&conf->device_lock, flags);
  4050. mddev->degraded--;
  4051. spin_unlock_irqrestore(&conf->device_lock, flags);
  4052. }
  4053. }
  4054. print_raid5_conf(conf);
  4055. return 0;
  4056. }
  4057. static int raid5_remove_disk(mddev_t *mddev, int number)
  4058. {
  4059. raid5_conf_t *conf = mddev->private;
  4060. int err = 0;
  4061. mdk_rdev_t *rdev;
  4062. struct disk_info *p = conf->disks + number;
  4063. print_raid5_conf(conf);
  4064. rdev = p->rdev;
  4065. if (rdev) {
  4066. if (test_bit(In_sync, &rdev->flags) ||
  4067. atomic_read(&rdev->nr_pending)) {
  4068. err = -EBUSY;
  4069. goto abort;
  4070. }
  4071. /* Only remove non-faulty devices if recovery
  4072. * isn't possible.
  4073. */
  4074. if (!test_bit(Faulty, &rdev->flags) &&
  4075. mddev->degraded <= conf->max_degraded) {
  4076. err = -EBUSY;
  4077. goto abort;
  4078. }
  4079. p->rdev = NULL;
  4080. synchronize_rcu();
  4081. if (atomic_read(&rdev->nr_pending)) {
  4082. /* lost the race, try later */
  4083. err = -EBUSY;
  4084. p->rdev = rdev;
  4085. }
  4086. }
  4087. abort:
  4088. print_raid5_conf(conf);
  4089. return err;
  4090. }
  4091. static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
  4092. {
  4093. raid5_conf_t *conf = mddev->private;
  4094. int err = -EEXIST;
  4095. int disk;
  4096. struct disk_info *p;
  4097. int first = 0;
  4098. int last = conf->raid_disks - 1;
  4099. if (mddev->degraded > conf->max_degraded)
  4100. /* no point adding a device */
  4101. return -EINVAL;
  4102. if (rdev->raid_disk >= 0)
  4103. first = last = rdev->raid_disk;
  4104. /*
  4105. * find the disk ... but prefer rdev->saved_raid_disk
  4106. * if possible.
  4107. */
  4108. if (rdev->saved_raid_disk >= 0 &&
  4109. rdev->saved_raid_disk >= first &&
  4110. conf->disks[rdev->saved_raid_disk].rdev == NULL)
  4111. disk = rdev->saved_raid_disk;
  4112. else
  4113. disk = first;
  4114. for ( ; disk <= last ; disk++)
  4115. if ((p=conf->disks + disk)->rdev == NULL) {
  4116. clear_bit(In_sync, &rdev->flags);
  4117. rdev->raid_disk = disk;
  4118. err = 0;
  4119. if (rdev->saved_raid_disk != disk)
  4120. conf->fullsync = 1;
  4121. rcu_assign_pointer(p->rdev, rdev);
  4122. break;
  4123. }
  4124. print_raid5_conf(conf);
  4125. return err;
  4126. }
  4127. static int raid5_resize(mddev_t *mddev, sector_t sectors)
  4128. {
  4129. /* no resync is happening, and there is enough space
  4130. * on all devices, so we can resize.
  4131. * We need to make sure resync covers any new space.
  4132. * If the array is shrinking we should possibly wait until
  4133. * any io in the removed space completes, but it hardly seems
  4134. * worth it.
  4135. */
  4136. raid5_conf_t *conf = mddev_to_conf(mddev);
  4137. sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
  4138. mddev->array_sectors = sectors * (mddev->raid_disks
  4139. - conf->max_degraded);
  4140. set_capacity(mddev->gendisk, mddev->array_sectors);
  4141. mddev->changed = 1;
  4142. if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
  4143. mddev->recovery_cp = mddev->dev_sectors;
  4144. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  4145. }
  4146. mddev->dev_sectors = sectors;
  4147. mddev->resync_max_sectors = sectors;
  4148. return 0;
  4149. }
  4150. #ifdef CONFIG_MD_RAID5_RESHAPE
  4151. static int raid5_check_reshape(mddev_t *mddev)
  4152. {
  4153. raid5_conf_t *conf = mddev_to_conf(mddev);
  4154. int err;
  4155. if (mddev->delta_disks < 0 ||
  4156. mddev->new_level != mddev->level)
  4157. return -EINVAL; /* Cannot shrink array or change level yet */
  4158. if (mddev->delta_disks == 0)
  4159. return 0; /* nothing to do */
  4160. if (mddev->bitmap)
  4161. /* Cannot grow a bitmap yet */
  4162. return -EBUSY;
  4163. /* Can only proceed if there are plenty of stripe_heads.
  4164. * We need a minimum of one full stripe,, and for sensible progress
  4165. * it is best to have about 4 times that.
  4166. * If we require 4 times, then the default 256 4K stripe_heads will
  4167. * allow for chunk sizes up to 256K, which is probably OK.
  4168. * If the chunk size is greater, user-space should request more
  4169. * stripe_heads first.
  4170. */
  4171. if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
  4172. (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
  4173. printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
  4174. (mddev->chunk_size / STRIPE_SIZE)*4);
  4175. return -ENOSPC;
  4176. }
  4177. err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
  4178. if (err)
  4179. return err;
  4180. if (mddev->degraded > conf->max_degraded)
  4181. return -EINVAL;
  4182. /* looks like we might be able to manage this */
  4183. return 0;
  4184. }
  4185. static int raid5_start_reshape(mddev_t *mddev)
  4186. {
  4187. raid5_conf_t *conf = mddev_to_conf(mddev);
  4188. mdk_rdev_t *rdev;
  4189. int spares = 0;
  4190. int added_devices = 0;
  4191. unsigned long flags;
  4192. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  4193. return -EBUSY;
  4194. list_for_each_entry(rdev, &mddev->disks, same_set)
  4195. if (rdev->raid_disk < 0 &&
  4196. !test_bit(Faulty, &rdev->flags))
  4197. spares++;
  4198. if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
  4199. /* Not enough devices even to make a degraded array
  4200. * of that size
  4201. */
  4202. return -EINVAL;
  4203. atomic_set(&conf->reshape_stripes, 0);
  4204. spin_lock_irq(&conf->device_lock);
  4205. conf->previous_raid_disks = conf->raid_disks;
  4206. conf->raid_disks += mddev->delta_disks;
  4207. conf->expand_progress = 0;
  4208. conf->expand_lo = 0;
  4209. spin_unlock_irq(&conf->device_lock);
  4210. /* Add some new drives, as many as will fit.
  4211. * We know there are enough to make the newly sized array work.
  4212. */
  4213. list_for_each_entry(rdev, &mddev->disks, same_set)
  4214. if (rdev->raid_disk < 0 &&
  4215. !test_bit(Faulty, &rdev->flags)) {
  4216. if (raid5_add_disk(mddev, rdev) == 0) {
  4217. char nm[20];
  4218. set_bit(In_sync, &rdev->flags);
  4219. added_devices++;
  4220. rdev->recovery_offset = 0;
  4221. sprintf(nm, "rd%d", rdev->raid_disk);
  4222. if (sysfs_create_link(&mddev->kobj,
  4223. &rdev->kobj, nm))
  4224. printk(KERN_WARNING
  4225. "raid5: failed to create "
  4226. " link %s for %s\n",
  4227. nm, mdname(mddev));
  4228. } else
  4229. break;
  4230. }
  4231. spin_lock_irqsave(&conf->device_lock, flags);
  4232. mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices;
  4233. spin_unlock_irqrestore(&conf->device_lock, flags);
  4234. mddev->raid_disks = conf->raid_disks;
  4235. mddev->reshape_position = 0;
  4236. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  4237. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  4238. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  4239. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  4240. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  4241. mddev->sync_thread = md_register_thread(md_do_sync, mddev,
  4242. "%s_reshape");
  4243. if (!mddev->sync_thread) {
  4244. mddev->recovery = 0;
  4245. spin_lock_irq(&conf->device_lock);
  4246. mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
  4247. conf->expand_progress = MaxSector;
  4248. spin_unlock_irq(&conf->device_lock);
  4249. return -EAGAIN;
  4250. }
  4251. md_wakeup_thread(mddev->sync_thread);
  4252. md_new_event(mddev);
  4253. return 0;
  4254. }
  4255. #endif
  4256. static void end_reshape(raid5_conf_t *conf)
  4257. {
  4258. struct block_device *bdev;
  4259. if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
  4260. conf->mddev->array_sectors = conf->mddev->dev_sectors *
  4261. (conf->raid_disks - conf->max_degraded);
  4262. set_capacity(conf->mddev->gendisk, conf->mddev->array_sectors);
  4263. conf->mddev->changed = 1;
  4264. bdev = bdget_disk(conf->mddev->gendisk, 0);
  4265. if (bdev) {
  4266. mutex_lock(&bdev->bd_inode->i_mutex);
  4267. i_size_write(bdev->bd_inode,
  4268. (loff_t)conf->mddev->array_sectors << 9);
  4269. mutex_unlock(&bdev->bd_inode->i_mutex);
  4270. bdput(bdev);
  4271. }
  4272. spin_lock_irq(&conf->device_lock);
  4273. conf->expand_progress = MaxSector;
  4274. spin_unlock_irq(&conf->device_lock);
  4275. conf->mddev->reshape_position = MaxSector;
  4276. /* read-ahead size must cover two whole stripes, which is
  4277. * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
  4278. */
  4279. {
  4280. int data_disks = conf->previous_raid_disks - conf->max_degraded;
  4281. int stripe = data_disks *
  4282. (conf->mddev->chunk_size / PAGE_SIZE);
  4283. if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
  4284. conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
  4285. }
  4286. }
  4287. }
  4288. static void raid5_quiesce(mddev_t *mddev, int state)
  4289. {
  4290. raid5_conf_t *conf = mddev_to_conf(mddev);
  4291. switch(state) {
  4292. case 2: /* resume for a suspend */
  4293. wake_up(&conf->wait_for_overlap);
  4294. break;
  4295. case 1: /* stop all writes */
  4296. spin_lock_irq(&conf->device_lock);
  4297. conf->quiesce = 1;
  4298. wait_event_lock_irq(conf->wait_for_stripe,
  4299. atomic_read(&conf->active_stripes) == 0 &&
  4300. atomic_read(&conf->active_aligned_reads) == 0,
  4301. conf->device_lock, /* nothing */);
  4302. spin_unlock_irq(&conf->device_lock);
  4303. break;
  4304. case 0: /* re-enable writes */
  4305. spin_lock_irq(&conf->device_lock);
  4306. conf->quiesce = 0;
  4307. wake_up(&conf->wait_for_stripe);
  4308. wake_up(&conf->wait_for_overlap);
  4309. spin_unlock_irq(&conf->device_lock);
  4310. break;
  4311. }
  4312. }
  4313. static struct mdk_personality raid6_personality =
  4314. {
  4315. .name = "raid6",
  4316. .level = 6,
  4317. .owner = THIS_MODULE,
  4318. .make_request = make_request,
  4319. .run = run,
  4320. .stop = stop,
  4321. .status = status,
  4322. .error_handler = error,
  4323. .hot_add_disk = raid5_add_disk,
  4324. .hot_remove_disk= raid5_remove_disk,
  4325. .spare_active = raid5_spare_active,
  4326. .sync_request = sync_request,
  4327. .resize = raid5_resize,
  4328. #ifdef CONFIG_MD_RAID5_RESHAPE
  4329. .check_reshape = raid5_check_reshape,
  4330. .start_reshape = raid5_start_reshape,
  4331. #endif
  4332. .quiesce = raid5_quiesce,
  4333. };
  4334. static struct mdk_personality raid5_personality =
  4335. {
  4336. .name = "raid5",
  4337. .level = 5,
  4338. .owner = THIS_MODULE,
  4339. .make_request = make_request,
  4340. .run = run,
  4341. .stop = stop,
  4342. .status = status,
  4343. .error_handler = error,
  4344. .hot_add_disk = raid5_add_disk,
  4345. .hot_remove_disk= raid5_remove_disk,
  4346. .spare_active = raid5_spare_active,
  4347. .sync_request = sync_request,
  4348. .resize = raid5_resize,
  4349. #ifdef CONFIG_MD_RAID5_RESHAPE
  4350. .check_reshape = raid5_check_reshape,
  4351. .start_reshape = raid5_start_reshape,
  4352. #endif
  4353. .quiesce = raid5_quiesce,
  4354. };
  4355. static struct mdk_personality raid4_personality =
  4356. {
  4357. .name = "raid4",
  4358. .level = 4,
  4359. .owner = THIS_MODULE,
  4360. .make_request = make_request,
  4361. .run = run,
  4362. .stop = stop,
  4363. .status = status,
  4364. .error_handler = error,
  4365. .hot_add_disk = raid5_add_disk,
  4366. .hot_remove_disk= raid5_remove_disk,
  4367. .spare_active = raid5_spare_active,
  4368. .sync_request = sync_request,
  4369. .resize = raid5_resize,
  4370. #ifdef CONFIG_MD_RAID5_RESHAPE
  4371. .check_reshape = raid5_check_reshape,
  4372. .start_reshape = raid5_start_reshape,
  4373. #endif
  4374. .quiesce = raid5_quiesce,
  4375. };
  4376. static int __init raid5_init(void)
  4377. {
  4378. int e;
  4379. e = raid6_select_algo();
  4380. if ( e )
  4381. return e;
  4382. register_md_personality(&raid6_personality);
  4383. register_md_personality(&raid5_personality);
  4384. register_md_personality(&raid4_personality);
  4385. return 0;
  4386. }
  4387. static void raid5_exit(void)
  4388. {
  4389. unregister_md_personality(&raid6_personality);
  4390. unregister_md_personality(&raid5_personality);
  4391. unregister_md_personality(&raid4_personality);
  4392. }
  4393. module_init(raid5_init);
  4394. module_exit(raid5_exit);
  4395. MODULE_LICENSE("GPL");
  4396. MODULE_ALIAS("md-personality-4"); /* RAID5 */
  4397. MODULE_ALIAS("md-raid5");
  4398. MODULE_ALIAS("md-raid4");
  4399. MODULE_ALIAS("md-level-5");
  4400. MODULE_ALIAS("md-level-4");
  4401. MODULE_ALIAS("md-personality-8"); /* RAID6 */
  4402. MODULE_ALIAS("md-raid6");
  4403. MODULE_ALIAS("md-level-6");
  4404. /* This used to be two separate modules, they were: */
  4405. MODULE_ALIAS("raid5");
  4406. MODULE_ALIAS("raid6");