raid5.c 180 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444
  1. /*
  2. * raid5.c : Multiple Devices driver for Linux
  3. * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
  4. * Copyright (C) 1999, 2000 Ingo Molnar
  5. * Copyright (C) 2002, 2003 H. Peter Anvin
  6. *
  7. * RAID-4/5/6 management functions.
  8. * Thanks to Penguin Computing for making the RAID-6 development possible
  9. * by donating a test server!
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * (for example /usr/src/linux/COPYING); if not, write to the Free
  18. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19. */
  20. /*
  21. * BITMAP UNPLUGGING:
  22. *
  23. * The sequencing for updating the bitmap reliably is a little
  24. * subtle (and I got it wrong the first time) so it deserves some
  25. * explanation.
  26. *
  27. * We group bitmap updates into batches. Each batch has a number.
  28. * We may write out several batches at once, but that isn't very important.
  29. * conf->seq_write is the number of the last batch successfully written.
  30. * conf->seq_flush is the number of the last batch that was closed to
  31. * new additions.
  32. * When we discover that we will need to write to any block in a stripe
  33. * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
  34. * the number of the batch it will be in. This is seq_flush+1.
  35. * When we are ready to do a write, if that batch hasn't been written yet,
  36. * we plug the array and queue the stripe for later.
  37. * When an unplug happens, we increment bm_flush, thus closing the current
  38. * batch.
  39. * When we notice that bm_flush > bm_write, we write out all pending updates
  40. * to the bitmap, and advance bm_write to where bm_flush was.
  41. * This may occasionally write a bit out twice, but is sure never to
  42. * miss any bits.
  43. */
  44. #include <linux/blkdev.h>
  45. #include <linux/kthread.h>
  46. #include <linux/raid/pq.h>
  47. #include <linux/async_tx.h>
  48. #include <linux/module.h>
  49. #include <linux/async.h>
  50. #include <linux/seq_file.h>
  51. #include <linux/cpu.h>
  52. #include <linux/slab.h>
  53. #include <linux/ratelimit.h>
  54. #include "md.h"
  55. #include "raid5.h"
  56. #include "raid0.h"
  57. #include "bitmap.h"
  58. /*
  59. * Stripe cache
  60. */
  61. #define NR_STRIPES 256
  62. #define STRIPE_SIZE PAGE_SIZE
  63. #define STRIPE_SHIFT (PAGE_SHIFT - 9)
  64. #define STRIPE_SECTORS (STRIPE_SIZE>>9)
  65. #define IO_THRESHOLD 1
  66. #define BYPASS_THRESHOLD 1
  67. #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
  68. #define HASH_MASK (NR_HASH - 1)
  69. static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
  70. {
  71. int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
  72. return &conf->stripe_hashtbl[hash];
  73. }
  74. /* bio's attached to a stripe+device for I/O are linked together in bi_sector
  75. * order without overlap. There may be several bio's per stripe+device, and
  76. * a bio could span several devices.
  77. * When walking this list for a particular stripe+device, we must never proceed
  78. * beyond a bio that extends past this device, as the next bio might no longer
  79. * be valid.
  80. * This function is used to determine the 'next' bio in the list, given the sector
  81. * of the current stripe+device
  82. */
  83. static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
  84. {
  85. int sectors = bio->bi_size >> 9;
  86. if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
  87. return bio->bi_next;
  88. else
  89. return NULL;
  90. }
  91. /*
  92. * We maintain a biased count of active stripes in the bottom 16 bits of
  93. * bi_phys_segments, and a count of processed stripes in the upper 16 bits
  94. */
  95. static inline int raid5_bi_processed_stripes(struct bio *bio)
  96. {
  97. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  98. return (atomic_read(segments) >> 16) & 0xffff;
  99. }
  100. static inline int raid5_dec_bi_active_stripes(struct bio *bio)
  101. {
  102. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  103. return atomic_sub_return(1, segments) & 0xffff;
  104. }
  105. static inline void raid5_inc_bi_active_stripes(struct bio *bio)
  106. {
  107. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  108. atomic_inc(segments);
  109. }
  110. static inline void raid5_set_bi_processed_stripes(struct bio *bio,
  111. unsigned int cnt)
  112. {
  113. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  114. int old, new;
  115. do {
  116. old = atomic_read(segments);
  117. new = (old & 0xffff) | (cnt << 16);
  118. } while (atomic_cmpxchg(segments, old, new) != old);
  119. }
  120. static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
  121. {
  122. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  123. atomic_set(segments, cnt);
  124. }
  125. /* Find first data disk in a raid6 stripe */
  126. static inline int raid6_d0(struct stripe_head *sh)
  127. {
  128. if (sh->ddf_layout)
  129. /* ddf always start from first device */
  130. return 0;
  131. /* md starts just after Q block */
  132. if (sh->qd_idx == sh->disks - 1)
  133. return 0;
  134. else
  135. return sh->qd_idx + 1;
  136. }
  137. static inline int raid6_next_disk(int disk, int raid_disks)
  138. {
  139. disk++;
  140. return (disk < raid_disks) ? disk : 0;
  141. }
  142. /* When walking through the disks in a raid5, starting at raid6_d0,
  143. * We need to map each disk to a 'slot', where the data disks are slot
  144. * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
  145. * is raid_disks-1. This help does that mapping.
  146. */
  147. static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
  148. int *count, int syndrome_disks)
  149. {
  150. int slot = *count;
  151. if (sh->ddf_layout)
  152. (*count)++;
  153. if (idx == sh->pd_idx)
  154. return syndrome_disks;
  155. if (idx == sh->qd_idx)
  156. return syndrome_disks + 1;
  157. if (!sh->ddf_layout)
  158. (*count)++;
  159. return slot;
  160. }
  161. static void return_io(struct bio *return_bi)
  162. {
  163. struct bio *bi = return_bi;
  164. while (bi) {
  165. return_bi = bi->bi_next;
  166. bi->bi_next = NULL;
  167. bi->bi_size = 0;
  168. bio_endio(bi, 0);
  169. bi = return_bi;
  170. }
  171. }
  172. static void print_raid5_conf (struct r5conf *conf);
  173. static int stripe_operations_active(struct stripe_head *sh)
  174. {
  175. return sh->check_state || sh->reconstruct_state ||
  176. test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
  177. test_bit(STRIPE_COMPUTE_RUN, &sh->state);
  178. }
  179. static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
  180. {
  181. BUG_ON(!list_empty(&sh->lru));
  182. BUG_ON(atomic_read(&conf->active_stripes)==0);
  183. if (test_bit(STRIPE_HANDLE, &sh->state)) {
  184. if (test_bit(STRIPE_DELAYED, &sh->state) &&
  185. !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  186. list_add_tail(&sh->lru, &conf->delayed_list);
  187. else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
  188. sh->bm_seq - conf->seq_write > 0)
  189. list_add_tail(&sh->lru, &conf->bitmap_list);
  190. else {
  191. clear_bit(STRIPE_DELAYED, &sh->state);
  192. clear_bit(STRIPE_BIT_DELAY, &sh->state);
  193. list_add_tail(&sh->lru, &conf->handle_list);
  194. }
  195. md_wakeup_thread(conf->mddev->thread);
  196. } else {
  197. BUG_ON(stripe_operations_active(sh));
  198. if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  199. if (atomic_dec_return(&conf->preread_active_stripes)
  200. < IO_THRESHOLD)
  201. md_wakeup_thread(conf->mddev->thread);
  202. atomic_dec(&conf->active_stripes);
  203. if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
  204. list_add_tail(&sh->lru, &conf->inactive_list);
  205. wake_up(&conf->wait_for_stripe);
  206. if (conf->retry_read_aligned)
  207. md_wakeup_thread(conf->mddev->thread);
  208. }
  209. }
  210. }
  211. static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
  212. {
  213. if (atomic_dec_and_test(&sh->count))
  214. do_release_stripe(conf, sh);
  215. }
  216. static void release_stripe(struct stripe_head *sh)
  217. {
  218. struct r5conf *conf = sh->raid_conf;
  219. unsigned long flags;
  220. local_irq_save(flags);
  221. if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
  222. do_release_stripe(conf, sh);
  223. spin_unlock(&conf->device_lock);
  224. }
  225. local_irq_restore(flags);
  226. }
  227. static inline void remove_hash(struct stripe_head *sh)
  228. {
  229. pr_debug("remove_hash(), stripe %llu\n",
  230. (unsigned long long)sh->sector);
  231. hlist_del_init(&sh->hash);
  232. }
  233. static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
  234. {
  235. struct hlist_head *hp = stripe_hash(conf, sh->sector);
  236. pr_debug("insert_hash(), stripe %llu\n",
  237. (unsigned long long)sh->sector);
  238. hlist_add_head(&sh->hash, hp);
  239. }
  240. /* find an idle stripe, make sure it is unhashed, and return it. */
  241. static struct stripe_head *get_free_stripe(struct r5conf *conf)
  242. {
  243. struct stripe_head *sh = NULL;
  244. struct list_head *first;
  245. if (list_empty(&conf->inactive_list))
  246. goto out;
  247. first = conf->inactive_list.next;
  248. sh = list_entry(first, struct stripe_head, lru);
  249. list_del_init(first);
  250. remove_hash(sh);
  251. atomic_inc(&conf->active_stripes);
  252. out:
  253. return sh;
  254. }
  255. static void shrink_buffers(struct stripe_head *sh)
  256. {
  257. struct page *p;
  258. int i;
  259. int num = sh->raid_conf->pool_size;
  260. for (i = 0; i < num ; i++) {
  261. p = sh->dev[i].page;
  262. if (!p)
  263. continue;
  264. sh->dev[i].page = NULL;
  265. put_page(p);
  266. }
  267. }
  268. static int grow_buffers(struct stripe_head *sh)
  269. {
  270. int i;
  271. int num = sh->raid_conf->pool_size;
  272. for (i = 0; i < num; i++) {
  273. struct page *page;
  274. if (!(page = alloc_page(GFP_KERNEL))) {
  275. return 1;
  276. }
  277. sh->dev[i].page = page;
  278. }
  279. return 0;
  280. }
  281. static void raid5_build_block(struct stripe_head *sh, int i, int previous);
  282. static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
  283. struct stripe_head *sh);
  284. static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
  285. {
  286. struct r5conf *conf = sh->raid_conf;
  287. int i;
  288. BUG_ON(atomic_read(&sh->count) != 0);
  289. BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
  290. BUG_ON(stripe_operations_active(sh));
  291. pr_debug("init_stripe called, stripe %llu\n",
  292. (unsigned long long)sh->sector);
  293. remove_hash(sh);
  294. sh->generation = conf->generation - previous;
  295. sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
  296. sh->sector = sector;
  297. stripe_set_idx(sector, conf, previous, sh);
  298. sh->state = 0;
  299. for (i = sh->disks; i--; ) {
  300. struct r5dev *dev = &sh->dev[i];
  301. if (dev->toread || dev->read || dev->towrite || dev->written ||
  302. test_bit(R5_LOCKED, &dev->flags)) {
  303. printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
  304. (unsigned long long)sh->sector, i, dev->toread,
  305. dev->read, dev->towrite, dev->written,
  306. test_bit(R5_LOCKED, &dev->flags));
  307. WARN_ON(1);
  308. }
  309. dev->flags = 0;
  310. raid5_build_block(sh, i, previous);
  311. }
  312. insert_hash(conf, sh);
  313. }
  314. static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
  315. short generation)
  316. {
  317. struct stripe_head *sh;
  318. struct hlist_node *hn;
  319. pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
  320. hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
  321. if (sh->sector == sector && sh->generation == generation)
  322. return sh;
  323. pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
  324. return NULL;
  325. }
  326. /*
  327. * Need to check if array has failed when deciding whether to:
  328. * - start an array
  329. * - remove non-faulty devices
  330. * - add a spare
  331. * - allow a reshape
  332. * This determination is simple when no reshape is happening.
  333. * However if there is a reshape, we need to carefully check
  334. * both the before and after sections.
  335. * This is because some failed devices may only affect one
  336. * of the two sections, and some non-in_sync devices may
  337. * be insync in the section most affected by failed devices.
  338. */
  339. static int calc_degraded(struct r5conf *conf)
  340. {
  341. int degraded, degraded2;
  342. int i;
  343. rcu_read_lock();
  344. degraded = 0;
  345. for (i = 0; i < conf->previous_raid_disks; i++) {
  346. struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
  347. if (rdev && test_bit(Faulty, &rdev->flags))
  348. rdev = rcu_dereference(conf->disks[i].replacement);
  349. if (!rdev || test_bit(Faulty, &rdev->flags))
  350. degraded++;
  351. else if (test_bit(In_sync, &rdev->flags))
  352. ;
  353. else
  354. /* not in-sync or faulty.
  355. * If the reshape increases the number of devices,
  356. * this is being recovered by the reshape, so
  357. * this 'previous' section is not in_sync.
  358. * If the number of devices is being reduced however,
  359. * the device can only be part of the array if
  360. * we are reverting a reshape, so this section will
  361. * be in-sync.
  362. */
  363. if (conf->raid_disks >= conf->previous_raid_disks)
  364. degraded++;
  365. }
  366. rcu_read_unlock();
  367. if (conf->raid_disks == conf->previous_raid_disks)
  368. return degraded;
  369. rcu_read_lock();
  370. degraded2 = 0;
  371. for (i = 0; i < conf->raid_disks; i++) {
  372. struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
  373. if (rdev && test_bit(Faulty, &rdev->flags))
  374. rdev = rcu_dereference(conf->disks[i].replacement);
  375. if (!rdev || test_bit(Faulty, &rdev->flags))
  376. degraded2++;
  377. else if (test_bit(In_sync, &rdev->flags))
  378. ;
  379. else
  380. /* not in-sync or faulty.
  381. * If reshape increases the number of devices, this
  382. * section has already been recovered, else it
  383. * almost certainly hasn't.
  384. */
  385. if (conf->raid_disks <= conf->previous_raid_disks)
  386. degraded2++;
  387. }
  388. rcu_read_unlock();
  389. if (degraded2 > degraded)
  390. return degraded2;
  391. return degraded;
  392. }
  393. static int has_failed(struct r5conf *conf)
  394. {
  395. int degraded;
  396. if (conf->mddev->reshape_position == MaxSector)
  397. return conf->mddev->degraded > conf->max_degraded;
  398. degraded = calc_degraded(conf);
  399. if (degraded > conf->max_degraded)
  400. return 1;
  401. return 0;
  402. }
  403. static struct stripe_head *
  404. get_active_stripe(struct r5conf *conf, sector_t sector,
  405. int previous, int noblock, int noquiesce)
  406. {
  407. struct stripe_head *sh;
  408. pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
  409. spin_lock_irq(&conf->device_lock);
  410. do {
  411. wait_event_lock_irq(conf->wait_for_stripe,
  412. conf->quiesce == 0 || noquiesce,
  413. conf->device_lock, /* nothing */);
  414. sh = __find_stripe(conf, sector, conf->generation - previous);
  415. if (!sh) {
  416. if (!conf->inactive_blocked)
  417. sh = get_free_stripe(conf);
  418. if (noblock && sh == NULL)
  419. break;
  420. if (!sh) {
  421. conf->inactive_blocked = 1;
  422. wait_event_lock_irq(conf->wait_for_stripe,
  423. !list_empty(&conf->inactive_list) &&
  424. (atomic_read(&conf->active_stripes)
  425. < (conf->max_nr_stripes *3/4)
  426. || !conf->inactive_blocked),
  427. conf->device_lock,
  428. );
  429. conf->inactive_blocked = 0;
  430. } else
  431. init_stripe(sh, sector, previous);
  432. } else {
  433. if (atomic_read(&sh->count)) {
  434. BUG_ON(!list_empty(&sh->lru)
  435. && !test_bit(STRIPE_EXPANDING, &sh->state)
  436. && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
  437. } else {
  438. if (!test_bit(STRIPE_HANDLE, &sh->state))
  439. atomic_inc(&conf->active_stripes);
  440. if (list_empty(&sh->lru) &&
  441. !test_bit(STRIPE_EXPANDING, &sh->state))
  442. BUG();
  443. list_del_init(&sh->lru);
  444. }
  445. }
  446. } while (sh == NULL);
  447. if (sh)
  448. atomic_inc(&sh->count);
  449. spin_unlock_irq(&conf->device_lock);
  450. return sh;
  451. }
  452. /* Determine if 'data_offset' or 'new_data_offset' should be used
  453. * in this stripe_head.
  454. */
  455. static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
  456. {
  457. sector_t progress = conf->reshape_progress;
  458. /* Need a memory barrier to make sure we see the value
  459. * of conf->generation, or ->data_offset that was set before
  460. * reshape_progress was updated.
  461. */
  462. smp_rmb();
  463. if (progress == MaxSector)
  464. return 0;
  465. if (sh->generation == conf->generation - 1)
  466. return 0;
  467. /* We are in a reshape, and this is a new-generation stripe,
  468. * so use new_data_offset.
  469. */
  470. return 1;
  471. }
  472. static void
  473. raid5_end_read_request(struct bio *bi, int error);
  474. static void
  475. raid5_end_write_request(struct bio *bi, int error);
  476. static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
  477. {
  478. struct r5conf *conf = sh->raid_conf;
  479. int i, disks = sh->disks;
  480. might_sleep();
  481. for (i = disks; i--; ) {
  482. int rw;
  483. int replace_only = 0;
  484. struct bio *bi, *rbi;
  485. struct md_rdev *rdev, *rrdev = NULL;
  486. if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
  487. if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
  488. rw = WRITE_FUA;
  489. else
  490. rw = WRITE;
  491. if (test_bit(R5_Discard, &sh->dev[i].flags))
  492. rw |= REQ_DISCARD;
  493. } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
  494. rw = READ;
  495. else if (test_and_clear_bit(R5_WantReplace,
  496. &sh->dev[i].flags)) {
  497. rw = WRITE;
  498. replace_only = 1;
  499. } else
  500. continue;
  501. if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
  502. rw |= REQ_SYNC;
  503. bi = &sh->dev[i].req;
  504. rbi = &sh->dev[i].rreq; /* For writing to replacement */
  505. bi->bi_rw = rw;
  506. rbi->bi_rw = rw;
  507. if (rw & WRITE) {
  508. bi->bi_end_io = raid5_end_write_request;
  509. rbi->bi_end_io = raid5_end_write_request;
  510. } else
  511. bi->bi_end_io = raid5_end_read_request;
  512. rcu_read_lock();
  513. rrdev = rcu_dereference(conf->disks[i].replacement);
  514. smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
  515. rdev = rcu_dereference(conf->disks[i].rdev);
  516. if (!rdev) {
  517. rdev = rrdev;
  518. rrdev = NULL;
  519. }
  520. if (rw & WRITE) {
  521. if (replace_only)
  522. rdev = NULL;
  523. if (rdev == rrdev)
  524. /* We raced and saw duplicates */
  525. rrdev = NULL;
  526. } else {
  527. if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
  528. rdev = rrdev;
  529. rrdev = NULL;
  530. }
  531. if (rdev && test_bit(Faulty, &rdev->flags))
  532. rdev = NULL;
  533. if (rdev)
  534. atomic_inc(&rdev->nr_pending);
  535. if (rrdev && test_bit(Faulty, &rrdev->flags))
  536. rrdev = NULL;
  537. if (rrdev)
  538. atomic_inc(&rrdev->nr_pending);
  539. rcu_read_unlock();
  540. /* We have already checked bad blocks for reads. Now
  541. * need to check for writes. We never accept write errors
  542. * on the replacement, so we don't to check rrdev.
  543. */
  544. while ((rw & WRITE) && rdev &&
  545. test_bit(WriteErrorSeen, &rdev->flags)) {
  546. sector_t first_bad;
  547. int bad_sectors;
  548. int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
  549. &first_bad, &bad_sectors);
  550. if (!bad)
  551. break;
  552. if (bad < 0) {
  553. set_bit(BlockedBadBlocks, &rdev->flags);
  554. if (!conf->mddev->external &&
  555. conf->mddev->flags) {
  556. /* It is very unlikely, but we might
  557. * still need to write out the
  558. * bad block log - better give it
  559. * a chance*/
  560. md_check_recovery(conf->mddev);
  561. }
  562. /*
  563. * Because md_wait_for_blocked_rdev
  564. * will dec nr_pending, we must
  565. * increment it first.
  566. */
  567. atomic_inc(&rdev->nr_pending);
  568. md_wait_for_blocked_rdev(rdev, conf->mddev);
  569. } else {
  570. /* Acknowledged bad block - skip the write */
  571. rdev_dec_pending(rdev, conf->mddev);
  572. rdev = NULL;
  573. }
  574. }
  575. if (rdev) {
  576. if (s->syncing || s->expanding || s->expanded
  577. || s->replacing)
  578. md_sync_acct(rdev->bdev, STRIPE_SECTORS);
  579. set_bit(STRIPE_IO_STARTED, &sh->state);
  580. bi->bi_bdev = rdev->bdev;
  581. pr_debug("%s: for %llu schedule op %ld on disc %d\n",
  582. __func__, (unsigned long long)sh->sector,
  583. bi->bi_rw, i);
  584. atomic_inc(&sh->count);
  585. if (use_new_offset(conf, sh))
  586. bi->bi_sector = (sh->sector
  587. + rdev->new_data_offset);
  588. else
  589. bi->bi_sector = (sh->sector
  590. + rdev->data_offset);
  591. if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
  592. bi->bi_rw |= REQ_FLUSH;
  593. bi->bi_flags = 1 << BIO_UPTODATE;
  594. bi->bi_idx = 0;
  595. bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
  596. bi->bi_io_vec[0].bv_offset = 0;
  597. bi->bi_size = STRIPE_SIZE;
  598. bi->bi_next = NULL;
  599. if (rrdev)
  600. set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
  601. generic_make_request(bi);
  602. }
  603. if (rrdev) {
  604. if (s->syncing || s->expanding || s->expanded
  605. || s->replacing)
  606. md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
  607. set_bit(STRIPE_IO_STARTED, &sh->state);
  608. rbi->bi_bdev = rrdev->bdev;
  609. pr_debug("%s: for %llu schedule op %ld on "
  610. "replacement disc %d\n",
  611. __func__, (unsigned long long)sh->sector,
  612. rbi->bi_rw, i);
  613. atomic_inc(&sh->count);
  614. if (use_new_offset(conf, sh))
  615. rbi->bi_sector = (sh->sector
  616. + rrdev->new_data_offset);
  617. else
  618. rbi->bi_sector = (sh->sector
  619. + rrdev->data_offset);
  620. rbi->bi_flags = 1 << BIO_UPTODATE;
  621. rbi->bi_idx = 0;
  622. rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
  623. rbi->bi_io_vec[0].bv_offset = 0;
  624. rbi->bi_size = STRIPE_SIZE;
  625. rbi->bi_next = NULL;
  626. generic_make_request(rbi);
  627. }
  628. if (!rdev && !rrdev) {
  629. if (rw & WRITE)
  630. set_bit(STRIPE_DEGRADED, &sh->state);
  631. pr_debug("skip op %ld on disc %d for sector %llu\n",
  632. bi->bi_rw, i, (unsigned long long)sh->sector);
  633. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  634. set_bit(STRIPE_HANDLE, &sh->state);
  635. }
  636. }
  637. }
  638. static struct dma_async_tx_descriptor *
  639. async_copy_data(int frombio, struct bio *bio, struct page *page,
  640. sector_t sector, struct dma_async_tx_descriptor *tx)
  641. {
  642. struct bio_vec *bvl;
  643. struct page *bio_page;
  644. int i;
  645. int page_offset;
  646. struct async_submit_ctl submit;
  647. enum async_tx_flags flags = 0;
  648. if (bio->bi_sector >= sector)
  649. page_offset = (signed)(bio->bi_sector - sector) * 512;
  650. else
  651. page_offset = (signed)(sector - bio->bi_sector) * -512;
  652. if (frombio)
  653. flags |= ASYNC_TX_FENCE;
  654. init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
  655. bio_for_each_segment(bvl, bio, i) {
  656. int len = bvl->bv_len;
  657. int clen;
  658. int b_offset = 0;
  659. if (page_offset < 0) {
  660. b_offset = -page_offset;
  661. page_offset += b_offset;
  662. len -= b_offset;
  663. }
  664. if (len > 0 && page_offset + len > STRIPE_SIZE)
  665. clen = STRIPE_SIZE - page_offset;
  666. else
  667. clen = len;
  668. if (clen > 0) {
  669. b_offset += bvl->bv_offset;
  670. bio_page = bvl->bv_page;
  671. if (frombio)
  672. tx = async_memcpy(page, bio_page, page_offset,
  673. b_offset, clen, &submit);
  674. else
  675. tx = async_memcpy(bio_page, page, b_offset,
  676. page_offset, clen, &submit);
  677. }
  678. /* chain the operations */
  679. submit.depend_tx = tx;
  680. if (clen < len) /* hit end of page */
  681. break;
  682. page_offset += len;
  683. }
  684. return tx;
  685. }
  686. static void ops_complete_biofill(void *stripe_head_ref)
  687. {
  688. struct stripe_head *sh = stripe_head_ref;
  689. struct bio *return_bi = NULL;
  690. int i;
  691. pr_debug("%s: stripe %llu\n", __func__,
  692. (unsigned long long)sh->sector);
  693. /* clear completed biofills */
  694. for (i = sh->disks; i--; ) {
  695. struct r5dev *dev = &sh->dev[i];
  696. /* acknowledge completion of a biofill operation */
  697. /* and check if we need to reply to a read request,
  698. * new R5_Wantfill requests are held off until
  699. * !STRIPE_BIOFILL_RUN
  700. */
  701. if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
  702. struct bio *rbi, *rbi2;
  703. BUG_ON(!dev->read);
  704. rbi = dev->read;
  705. dev->read = NULL;
  706. while (rbi && rbi->bi_sector <
  707. dev->sector + STRIPE_SECTORS) {
  708. rbi2 = r5_next_bio(rbi, dev->sector);
  709. if (!raid5_dec_bi_active_stripes(rbi)) {
  710. rbi->bi_next = return_bi;
  711. return_bi = rbi;
  712. }
  713. rbi = rbi2;
  714. }
  715. }
  716. }
  717. clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
  718. return_io(return_bi);
  719. set_bit(STRIPE_HANDLE, &sh->state);
  720. release_stripe(sh);
  721. }
  722. static void ops_run_biofill(struct stripe_head *sh)
  723. {
  724. struct dma_async_tx_descriptor *tx = NULL;
  725. struct async_submit_ctl submit;
  726. int i;
  727. pr_debug("%s: stripe %llu\n", __func__,
  728. (unsigned long long)sh->sector);
  729. for (i = sh->disks; i--; ) {
  730. struct r5dev *dev = &sh->dev[i];
  731. if (test_bit(R5_Wantfill, &dev->flags)) {
  732. struct bio *rbi;
  733. spin_lock_irq(&sh->stripe_lock);
  734. dev->read = rbi = dev->toread;
  735. dev->toread = NULL;
  736. spin_unlock_irq(&sh->stripe_lock);
  737. while (rbi && rbi->bi_sector <
  738. dev->sector + STRIPE_SECTORS) {
  739. tx = async_copy_data(0, rbi, dev->page,
  740. dev->sector, tx);
  741. rbi = r5_next_bio(rbi, dev->sector);
  742. }
  743. }
  744. }
  745. atomic_inc(&sh->count);
  746. init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
  747. async_trigger_callback(&submit);
  748. }
  749. static void mark_target_uptodate(struct stripe_head *sh, int target)
  750. {
  751. struct r5dev *tgt;
  752. if (target < 0)
  753. return;
  754. tgt = &sh->dev[target];
  755. set_bit(R5_UPTODATE, &tgt->flags);
  756. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  757. clear_bit(R5_Wantcompute, &tgt->flags);
  758. }
  759. static void ops_complete_compute(void *stripe_head_ref)
  760. {
  761. struct stripe_head *sh = stripe_head_ref;
  762. pr_debug("%s: stripe %llu\n", __func__,
  763. (unsigned long long)sh->sector);
  764. /* mark the computed target(s) as uptodate */
  765. mark_target_uptodate(sh, sh->ops.target);
  766. mark_target_uptodate(sh, sh->ops.target2);
  767. clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
  768. if (sh->check_state == check_state_compute_run)
  769. sh->check_state = check_state_compute_result;
  770. set_bit(STRIPE_HANDLE, &sh->state);
  771. release_stripe(sh);
  772. }
  773. /* return a pointer to the address conversion region of the scribble buffer */
  774. static addr_conv_t *to_addr_conv(struct stripe_head *sh,
  775. struct raid5_percpu *percpu)
  776. {
  777. return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
  778. }
  779. static struct dma_async_tx_descriptor *
  780. ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
  781. {
  782. int disks = sh->disks;
  783. struct page **xor_srcs = percpu->scribble;
  784. int target = sh->ops.target;
  785. struct r5dev *tgt = &sh->dev[target];
  786. struct page *xor_dest = tgt->page;
  787. int count = 0;
  788. struct dma_async_tx_descriptor *tx;
  789. struct async_submit_ctl submit;
  790. int i;
  791. pr_debug("%s: stripe %llu block: %d\n",
  792. __func__, (unsigned long long)sh->sector, target);
  793. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  794. for (i = disks; i--; )
  795. if (i != target)
  796. xor_srcs[count++] = sh->dev[i].page;
  797. atomic_inc(&sh->count);
  798. init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
  799. ops_complete_compute, sh, to_addr_conv(sh, percpu));
  800. if (unlikely(count == 1))
  801. tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
  802. else
  803. tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
  804. return tx;
  805. }
  806. /* set_syndrome_sources - populate source buffers for gen_syndrome
  807. * @srcs - (struct page *) array of size sh->disks
  808. * @sh - stripe_head to parse
  809. *
  810. * Populates srcs in proper layout order for the stripe and returns the
  811. * 'count' of sources to be used in a call to async_gen_syndrome. The P
  812. * destination buffer is recorded in srcs[count] and the Q destination
  813. * is recorded in srcs[count+1]].
  814. */
  815. static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
  816. {
  817. int disks = sh->disks;
  818. int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
  819. int d0_idx = raid6_d0(sh);
  820. int count;
  821. int i;
  822. for (i = 0; i < disks; i++)
  823. srcs[i] = NULL;
  824. count = 0;
  825. i = d0_idx;
  826. do {
  827. int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
  828. srcs[slot] = sh->dev[i].page;
  829. i = raid6_next_disk(i, disks);
  830. } while (i != d0_idx);
  831. return syndrome_disks;
  832. }
  833. static struct dma_async_tx_descriptor *
  834. ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
  835. {
  836. int disks = sh->disks;
  837. struct page **blocks = percpu->scribble;
  838. int target;
  839. int qd_idx = sh->qd_idx;
  840. struct dma_async_tx_descriptor *tx;
  841. struct async_submit_ctl submit;
  842. struct r5dev *tgt;
  843. struct page *dest;
  844. int i;
  845. int count;
  846. if (sh->ops.target < 0)
  847. target = sh->ops.target2;
  848. else if (sh->ops.target2 < 0)
  849. target = sh->ops.target;
  850. else
  851. /* we should only have one valid target */
  852. BUG();
  853. BUG_ON(target < 0);
  854. pr_debug("%s: stripe %llu block: %d\n",
  855. __func__, (unsigned long long)sh->sector, target);
  856. tgt = &sh->dev[target];
  857. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  858. dest = tgt->page;
  859. atomic_inc(&sh->count);
  860. if (target == qd_idx) {
  861. count = set_syndrome_sources(blocks, sh);
  862. blocks[count] = NULL; /* regenerating p is not necessary */
  863. BUG_ON(blocks[count+1] != dest); /* q should already be set */
  864. init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
  865. ops_complete_compute, sh,
  866. to_addr_conv(sh, percpu));
  867. tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
  868. } else {
  869. /* Compute any data- or p-drive using XOR */
  870. count = 0;
  871. for (i = disks; i-- ; ) {
  872. if (i == target || i == qd_idx)
  873. continue;
  874. blocks[count++] = sh->dev[i].page;
  875. }
  876. init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
  877. NULL, ops_complete_compute, sh,
  878. to_addr_conv(sh, percpu));
  879. tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
  880. }
  881. return tx;
  882. }
  883. static struct dma_async_tx_descriptor *
  884. ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
  885. {
  886. int i, count, disks = sh->disks;
  887. int syndrome_disks = sh->ddf_layout ? disks : disks-2;
  888. int d0_idx = raid6_d0(sh);
  889. int faila = -1, failb = -1;
  890. int target = sh->ops.target;
  891. int target2 = sh->ops.target2;
  892. struct r5dev *tgt = &sh->dev[target];
  893. struct r5dev *tgt2 = &sh->dev[target2];
  894. struct dma_async_tx_descriptor *tx;
  895. struct page **blocks = percpu->scribble;
  896. struct async_submit_ctl submit;
  897. pr_debug("%s: stripe %llu block1: %d block2: %d\n",
  898. __func__, (unsigned long long)sh->sector, target, target2);
  899. BUG_ON(target < 0 || target2 < 0);
  900. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  901. BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
  902. /* we need to open-code set_syndrome_sources to handle the
  903. * slot number conversion for 'faila' and 'failb'
  904. */
  905. for (i = 0; i < disks ; i++)
  906. blocks[i] = NULL;
  907. count = 0;
  908. i = d0_idx;
  909. do {
  910. int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
  911. blocks[slot] = sh->dev[i].page;
  912. if (i == target)
  913. faila = slot;
  914. if (i == target2)
  915. failb = slot;
  916. i = raid6_next_disk(i, disks);
  917. } while (i != d0_idx);
  918. BUG_ON(faila == failb);
  919. if (failb < faila)
  920. swap(faila, failb);
  921. pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
  922. __func__, (unsigned long long)sh->sector, faila, failb);
  923. atomic_inc(&sh->count);
  924. if (failb == syndrome_disks+1) {
  925. /* Q disk is one of the missing disks */
  926. if (faila == syndrome_disks) {
  927. /* Missing P+Q, just recompute */
  928. init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
  929. ops_complete_compute, sh,
  930. to_addr_conv(sh, percpu));
  931. return async_gen_syndrome(blocks, 0, syndrome_disks+2,
  932. STRIPE_SIZE, &submit);
  933. } else {
  934. struct page *dest;
  935. int data_target;
  936. int qd_idx = sh->qd_idx;
  937. /* Missing D+Q: recompute D from P, then recompute Q */
  938. if (target == qd_idx)
  939. data_target = target2;
  940. else
  941. data_target = target;
  942. count = 0;
  943. for (i = disks; i-- ; ) {
  944. if (i == data_target || i == qd_idx)
  945. continue;
  946. blocks[count++] = sh->dev[i].page;
  947. }
  948. dest = sh->dev[data_target].page;
  949. init_async_submit(&submit,
  950. ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
  951. NULL, NULL, NULL,
  952. to_addr_conv(sh, percpu));
  953. tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
  954. &submit);
  955. count = set_syndrome_sources(blocks, sh);
  956. init_async_submit(&submit, ASYNC_TX_FENCE, tx,
  957. ops_complete_compute, sh,
  958. to_addr_conv(sh, percpu));
  959. return async_gen_syndrome(blocks, 0, count+2,
  960. STRIPE_SIZE, &submit);
  961. }
  962. } else {
  963. init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
  964. ops_complete_compute, sh,
  965. to_addr_conv(sh, percpu));
  966. if (failb == syndrome_disks) {
  967. /* We're missing D+P. */
  968. return async_raid6_datap_recov(syndrome_disks+2,
  969. STRIPE_SIZE, faila,
  970. blocks, &submit);
  971. } else {
  972. /* We're missing D+D. */
  973. return async_raid6_2data_recov(syndrome_disks+2,
  974. STRIPE_SIZE, faila, failb,
  975. blocks, &submit);
  976. }
  977. }
  978. }
  979. static void ops_complete_prexor(void *stripe_head_ref)
  980. {
  981. struct stripe_head *sh = stripe_head_ref;
  982. pr_debug("%s: stripe %llu\n", __func__,
  983. (unsigned long long)sh->sector);
  984. }
  985. static struct dma_async_tx_descriptor *
  986. ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
  987. struct dma_async_tx_descriptor *tx)
  988. {
  989. int disks = sh->disks;
  990. struct page **xor_srcs = percpu->scribble;
  991. int count = 0, pd_idx = sh->pd_idx, i;
  992. struct async_submit_ctl submit;
  993. /* existing parity data subtracted */
  994. struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
  995. pr_debug("%s: stripe %llu\n", __func__,
  996. (unsigned long long)sh->sector);
  997. for (i = disks; i--; ) {
  998. struct r5dev *dev = &sh->dev[i];
  999. /* Only process blocks that are known to be uptodate */
  1000. if (test_bit(R5_Wantdrain, &dev->flags))
  1001. xor_srcs[count++] = dev->page;
  1002. }
  1003. init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
  1004. ops_complete_prexor, sh, to_addr_conv(sh, percpu));
  1005. tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
  1006. return tx;
  1007. }
  1008. static struct dma_async_tx_descriptor *
  1009. ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
  1010. {
  1011. int disks = sh->disks;
  1012. int i;
  1013. pr_debug("%s: stripe %llu\n", __func__,
  1014. (unsigned long long)sh->sector);
  1015. for (i = disks; i--; ) {
  1016. struct r5dev *dev = &sh->dev[i];
  1017. struct bio *chosen;
  1018. if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
  1019. struct bio *wbi;
  1020. spin_lock_irq(&sh->stripe_lock);
  1021. chosen = dev->towrite;
  1022. dev->towrite = NULL;
  1023. BUG_ON(dev->written);
  1024. wbi = dev->written = chosen;
  1025. spin_unlock_irq(&sh->stripe_lock);
  1026. while (wbi && wbi->bi_sector <
  1027. dev->sector + STRIPE_SECTORS) {
  1028. if (wbi->bi_rw & REQ_FUA)
  1029. set_bit(R5_WantFUA, &dev->flags);
  1030. if (wbi->bi_rw & REQ_SYNC)
  1031. set_bit(R5_SyncIO, &dev->flags);
  1032. if (wbi->bi_rw & REQ_DISCARD)
  1033. set_bit(R5_Discard, &dev->flags);
  1034. else
  1035. tx = async_copy_data(1, wbi, dev->page,
  1036. dev->sector, tx);
  1037. wbi = r5_next_bio(wbi, dev->sector);
  1038. }
  1039. }
  1040. }
  1041. return tx;
  1042. }
  1043. static void ops_complete_reconstruct(void *stripe_head_ref)
  1044. {
  1045. struct stripe_head *sh = stripe_head_ref;
  1046. int disks = sh->disks;
  1047. int pd_idx = sh->pd_idx;
  1048. int qd_idx = sh->qd_idx;
  1049. int i;
  1050. bool fua = false, sync = false, discard = false;
  1051. pr_debug("%s: stripe %llu\n", __func__,
  1052. (unsigned long long)sh->sector);
  1053. for (i = disks; i--; ) {
  1054. fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
  1055. sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
  1056. discard |= test_bit(R5_Discard, &sh->dev[i].flags);
  1057. }
  1058. for (i = disks; i--; ) {
  1059. struct r5dev *dev = &sh->dev[i];
  1060. if (dev->written || i == pd_idx || i == qd_idx) {
  1061. if (!discard)
  1062. set_bit(R5_UPTODATE, &dev->flags);
  1063. if (fua)
  1064. set_bit(R5_WantFUA, &dev->flags);
  1065. if (sync)
  1066. set_bit(R5_SyncIO, &dev->flags);
  1067. }
  1068. }
  1069. if (sh->reconstruct_state == reconstruct_state_drain_run)
  1070. sh->reconstruct_state = reconstruct_state_drain_result;
  1071. else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
  1072. sh->reconstruct_state = reconstruct_state_prexor_drain_result;
  1073. else {
  1074. BUG_ON(sh->reconstruct_state != reconstruct_state_run);
  1075. sh->reconstruct_state = reconstruct_state_result;
  1076. }
  1077. set_bit(STRIPE_HANDLE, &sh->state);
  1078. release_stripe(sh);
  1079. }
  1080. static void
  1081. ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
  1082. struct dma_async_tx_descriptor *tx)
  1083. {
  1084. int disks = sh->disks;
  1085. struct page **xor_srcs = percpu->scribble;
  1086. struct async_submit_ctl submit;
  1087. int count = 0, pd_idx = sh->pd_idx, i;
  1088. struct page *xor_dest;
  1089. int prexor = 0;
  1090. unsigned long flags;
  1091. pr_debug("%s: stripe %llu\n", __func__,
  1092. (unsigned long long)sh->sector);
  1093. for (i = 0; i < sh->disks; i++) {
  1094. if (pd_idx == i)
  1095. continue;
  1096. if (!test_bit(R5_Discard, &sh->dev[i].flags))
  1097. break;
  1098. }
  1099. if (i >= sh->disks) {
  1100. atomic_inc(&sh->count);
  1101. set_bit(R5_Discard, &sh->dev[pd_idx].flags);
  1102. ops_complete_reconstruct(sh);
  1103. return;
  1104. }
  1105. /* check if prexor is active which means only process blocks
  1106. * that are part of a read-modify-write (written)
  1107. */
  1108. if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
  1109. prexor = 1;
  1110. xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
  1111. for (i = disks; i--; ) {
  1112. struct r5dev *dev = &sh->dev[i];
  1113. if (dev->written)
  1114. xor_srcs[count++] = dev->page;
  1115. }
  1116. } else {
  1117. xor_dest = sh->dev[pd_idx].page;
  1118. for (i = disks; i--; ) {
  1119. struct r5dev *dev = &sh->dev[i];
  1120. if (i != pd_idx)
  1121. xor_srcs[count++] = dev->page;
  1122. }
  1123. }
  1124. /* 1/ if we prexor'd then the dest is reused as a source
  1125. * 2/ if we did not prexor then we are redoing the parity
  1126. * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
  1127. * for the synchronous xor case
  1128. */
  1129. flags = ASYNC_TX_ACK |
  1130. (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
  1131. atomic_inc(&sh->count);
  1132. init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
  1133. to_addr_conv(sh, percpu));
  1134. if (unlikely(count == 1))
  1135. tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
  1136. else
  1137. tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
  1138. }
  1139. static void
  1140. ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
  1141. struct dma_async_tx_descriptor *tx)
  1142. {
  1143. struct async_submit_ctl submit;
  1144. struct page **blocks = percpu->scribble;
  1145. int count, i;
  1146. pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
  1147. for (i = 0; i < sh->disks; i++) {
  1148. if (sh->pd_idx == i || sh->qd_idx == i)
  1149. continue;
  1150. if (!test_bit(R5_Discard, &sh->dev[i].flags))
  1151. break;
  1152. }
  1153. if (i >= sh->disks) {
  1154. atomic_inc(&sh->count);
  1155. set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
  1156. set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
  1157. ops_complete_reconstruct(sh);
  1158. return;
  1159. }
  1160. count = set_syndrome_sources(blocks, sh);
  1161. atomic_inc(&sh->count);
  1162. init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
  1163. sh, to_addr_conv(sh, percpu));
  1164. async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
  1165. }
  1166. static void ops_complete_check(void *stripe_head_ref)
  1167. {
  1168. struct stripe_head *sh = stripe_head_ref;
  1169. pr_debug("%s: stripe %llu\n", __func__,
  1170. (unsigned long long)sh->sector);
  1171. sh->check_state = check_state_check_result;
  1172. set_bit(STRIPE_HANDLE, &sh->state);
  1173. release_stripe(sh);
  1174. }
  1175. static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
  1176. {
  1177. int disks = sh->disks;
  1178. int pd_idx = sh->pd_idx;
  1179. int qd_idx = sh->qd_idx;
  1180. struct page *xor_dest;
  1181. struct page **xor_srcs = percpu->scribble;
  1182. struct dma_async_tx_descriptor *tx;
  1183. struct async_submit_ctl submit;
  1184. int count;
  1185. int i;
  1186. pr_debug("%s: stripe %llu\n", __func__,
  1187. (unsigned long long)sh->sector);
  1188. count = 0;
  1189. xor_dest = sh->dev[pd_idx].page;
  1190. xor_srcs[count++] = xor_dest;
  1191. for (i = disks; i--; ) {
  1192. if (i == pd_idx || i == qd_idx)
  1193. continue;
  1194. xor_srcs[count++] = sh->dev[i].page;
  1195. }
  1196. init_async_submit(&submit, 0, NULL, NULL, NULL,
  1197. to_addr_conv(sh, percpu));
  1198. tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
  1199. &sh->ops.zero_sum_result, &submit);
  1200. atomic_inc(&sh->count);
  1201. init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
  1202. tx = async_trigger_callback(&submit);
  1203. }
  1204. static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
  1205. {
  1206. struct page **srcs = percpu->scribble;
  1207. struct async_submit_ctl submit;
  1208. int count;
  1209. pr_debug("%s: stripe %llu checkp: %d\n", __func__,
  1210. (unsigned long long)sh->sector, checkp);
  1211. count = set_syndrome_sources(srcs, sh);
  1212. if (!checkp)
  1213. srcs[count] = NULL;
  1214. atomic_inc(&sh->count);
  1215. init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
  1216. sh, to_addr_conv(sh, percpu));
  1217. async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
  1218. &sh->ops.zero_sum_result, percpu->spare_page, &submit);
  1219. }
  1220. static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
  1221. {
  1222. int overlap_clear = 0, i, disks = sh->disks;
  1223. struct dma_async_tx_descriptor *tx = NULL;
  1224. struct r5conf *conf = sh->raid_conf;
  1225. int level = conf->level;
  1226. struct raid5_percpu *percpu;
  1227. unsigned long cpu;
  1228. cpu = get_cpu();
  1229. percpu = per_cpu_ptr(conf->percpu, cpu);
  1230. if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
  1231. ops_run_biofill(sh);
  1232. overlap_clear++;
  1233. }
  1234. if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
  1235. if (level < 6)
  1236. tx = ops_run_compute5(sh, percpu);
  1237. else {
  1238. if (sh->ops.target2 < 0 || sh->ops.target < 0)
  1239. tx = ops_run_compute6_1(sh, percpu);
  1240. else
  1241. tx = ops_run_compute6_2(sh, percpu);
  1242. }
  1243. /* terminate the chain if reconstruct is not set to be run */
  1244. if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
  1245. async_tx_ack(tx);
  1246. }
  1247. if (test_bit(STRIPE_OP_PREXOR, &ops_request))
  1248. tx = ops_run_prexor(sh, percpu, tx);
  1249. if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
  1250. tx = ops_run_biodrain(sh, tx);
  1251. overlap_clear++;
  1252. }
  1253. if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
  1254. if (level < 6)
  1255. ops_run_reconstruct5(sh, percpu, tx);
  1256. else
  1257. ops_run_reconstruct6(sh, percpu, tx);
  1258. }
  1259. if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
  1260. if (sh->check_state == check_state_run)
  1261. ops_run_check_p(sh, percpu);
  1262. else if (sh->check_state == check_state_run_q)
  1263. ops_run_check_pq(sh, percpu, 0);
  1264. else if (sh->check_state == check_state_run_pq)
  1265. ops_run_check_pq(sh, percpu, 1);
  1266. else
  1267. BUG();
  1268. }
  1269. if (overlap_clear)
  1270. for (i = disks; i--; ) {
  1271. struct r5dev *dev = &sh->dev[i];
  1272. if (test_and_clear_bit(R5_Overlap, &dev->flags))
  1273. wake_up(&sh->raid_conf->wait_for_overlap);
  1274. }
  1275. put_cpu();
  1276. }
  1277. #ifdef CONFIG_MULTICORE_RAID456
  1278. static void async_run_ops(void *param, async_cookie_t cookie)
  1279. {
  1280. struct stripe_head *sh = param;
  1281. unsigned long ops_request = sh->ops.request;
  1282. clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
  1283. wake_up(&sh->ops.wait_for_ops);
  1284. __raid_run_ops(sh, ops_request);
  1285. release_stripe(sh);
  1286. }
  1287. static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
  1288. {
  1289. /* since handle_stripe can be called outside of raid5d context
  1290. * we need to ensure sh->ops.request is de-staged before another
  1291. * request arrives
  1292. */
  1293. wait_event(sh->ops.wait_for_ops,
  1294. !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
  1295. sh->ops.request = ops_request;
  1296. atomic_inc(&sh->count);
  1297. async_schedule(async_run_ops, sh);
  1298. }
  1299. #else
  1300. #define raid_run_ops __raid_run_ops
  1301. #endif
  1302. static int grow_one_stripe(struct r5conf *conf)
  1303. {
  1304. struct stripe_head *sh;
  1305. sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
  1306. if (!sh)
  1307. return 0;
  1308. sh->raid_conf = conf;
  1309. #ifdef CONFIG_MULTICORE_RAID456
  1310. init_waitqueue_head(&sh->ops.wait_for_ops);
  1311. #endif
  1312. spin_lock_init(&sh->stripe_lock);
  1313. if (grow_buffers(sh)) {
  1314. shrink_buffers(sh);
  1315. kmem_cache_free(conf->slab_cache, sh);
  1316. return 0;
  1317. }
  1318. /* we just created an active stripe so... */
  1319. atomic_set(&sh->count, 1);
  1320. atomic_inc(&conf->active_stripes);
  1321. INIT_LIST_HEAD(&sh->lru);
  1322. release_stripe(sh);
  1323. return 1;
  1324. }
  1325. static int grow_stripes(struct r5conf *conf, int num)
  1326. {
  1327. struct kmem_cache *sc;
  1328. int devs = max(conf->raid_disks, conf->previous_raid_disks);
  1329. if (conf->mddev->gendisk)
  1330. sprintf(conf->cache_name[0],
  1331. "raid%d-%s", conf->level, mdname(conf->mddev));
  1332. else
  1333. sprintf(conf->cache_name[0],
  1334. "raid%d-%p", conf->level, conf->mddev);
  1335. sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
  1336. conf->active_name = 0;
  1337. sc = kmem_cache_create(conf->cache_name[conf->active_name],
  1338. sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
  1339. 0, 0, NULL);
  1340. if (!sc)
  1341. return 1;
  1342. conf->slab_cache = sc;
  1343. conf->pool_size = devs;
  1344. while (num--)
  1345. if (!grow_one_stripe(conf))
  1346. return 1;
  1347. return 0;
  1348. }
  1349. /**
  1350. * scribble_len - return the required size of the scribble region
  1351. * @num - total number of disks in the array
  1352. *
  1353. * The size must be enough to contain:
  1354. * 1/ a struct page pointer for each device in the array +2
  1355. * 2/ room to convert each entry in (1) to its corresponding dma
  1356. * (dma_map_page()) or page (page_address()) address.
  1357. *
  1358. * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
  1359. * calculate over all devices (not just the data blocks), using zeros in place
  1360. * of the P and Q blocks.
  1361. */
  1362. static size_t scribble_len(int num)
  1363. {
  1364. size_t len;
  1365. len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
  1366. return len;
  1367. }
  1368. static int resize_stripes(struct r5conf *conf, int newsize)
  1369. {
  1370. /* Make all the stripes able to hold 'newsize' devices.
  1371. * New slots in each stripe get 'page' set to a new page.
  1372. *
  1373. * This happens in stages:
  1374. * 1/ create a new kmem_cache and allocate the required number of
  1375. * stripe_heads.
  1376. * 2/ gather all the old stripe_heads and transfer the pages across
  1377. * to the new stripe_heads. This will have the side effect of
  1378. * freezing the array as once all stripe_heads have been collected,
  1379. * no IO will be possible. Old stripe heads are freed once their
  1380. * pages have been transferred over, and the old kmem_cache is
  1381. * freed when all stripes are done.
  1382. * 3/ reallocate conf->disks to be suitable bigger. If this fails,
  1383. * we simple return a failre status - no need to clean anything up.
  1384. * 4/ allocate new pages for the new slots in the new stripe_heads.
  1385. * If this fails, we don't bother trying the shrink the
  1386. * stripe_heads down again, we just leave them as they are.
  1387. * As each stripe_head is processed the new one is released into
  1388. * active service.
  1389. *
  1390. * Once step2 is started, we cannot afford to wait for a write,
  1391. * so we use GFP_NOIO allocations.
  1392. */
  1393. struct stripe_head *osh, *nsh;
  1394. LIST_HEAD(newstripes);
  1395. struct disk_info *ndisks;
  1396. unsigned long cpu;
  1397. int err;
  1398. struct kmem_cache *sc;
  1399. int i;
  1400. if (newsize <= conf->pool_size)
  1401. return 0; /* never bother to shrink */
  1402. err = md_allow_write(conf->mddev);
  1403. if (err)
  1404. return err;
  1405. /* Step 1 */
  1406. sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
  1407. sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
  1408. 0, 0, NULL);
  1409. if (!sc)
  1410. return -ENOMEM;
  1411. for (i = conf->max_nr_stripes; i; i--) {
  1412. nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
  1413. if (!nsh)
  1414. break;
  1415. nsh->raid_conf = conf;
  1416. #ifdef CONFIG_MULTICORE_RAID456
  1417. init_waitqueue_head(&nsh->ops.wait_for_ops);
  1418. #endif
  1419. spin_lock_init(&nsh->stripe_lock);
  1420. list_add(&nsh->lru, &newstripes);
  1421. }
  1422. if (i) {
  1423. /* didn't get enough, give up */
  1424. while (!list_empty(&newstripes)) {
  1425. nsh = list_entry(newstripes.next, struct stripe_head, lru);
  1426. list_del(&nsh->lru);
  1427. kmem_cache_free(sc, nsh);
  1428. }
  1429. kmem_cache_destroy(sc);
  1430. return -ENOMEM;
  1431. }
  1432. /* Step 2 - Must use GFP_NOIO now.
  1433. * OK, we have enough stripes, start collecting inactive
  1434. * stripes and copying them over
  1435. */
  1436. list_for_each_entry(nsh, &newstripes, lru) {
  1437. spin_lock_irq(&conf->device_lock);
  1438. wait_event_lock_irq(conf->wait_for_stripe,
  1439. !list_empty(&conf->inactive_list),
  1440. conf->device_lock,
  1441. );
  1442. osh = get_free_stripe(conf);
  1443. spin_unlock_irq(&conf->device_lock);
  1444. atomic_set(&nsh->count, 1);
  1445. for(i=0; i<conf->pool_size; i++)
  1446. nsh->dev[i].page = osh->dev[i].page;
  1447. for( ; i<newsize; i++)
  1448. nsh->dev[i].page = NULL;
  1449. kmem_cache_free(conf->slab_cache, osh);
  1450. }
  1451. kmem_cache_destroy(conf->slab_cache);
  1452. /* Step 3.
  1453. * At this point, we are holding all the stripes so the array
  1454. * is completely stalled, so now is a good time to resize
  1455. * conf->disks and the scribble region
  1456. */
  1457. ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
  1458. if (ndisks) {
  1459. for (i=0; i<conf->raid_disks; i++)
  1460. ndisks[i] = conf->disks[i];
  1461. kfree(conf->disks);
  1462. conf->disks = ndisks;
  1463. } else
  1464. err = -ENOMEM;
  1465. get_online_cpus();
  1466. conf->scribble_len = scribble_len(newsize);
  1467. for_each_present_cpu(cpu) {
  1468. struct raid5_percpu *percpu;
  1469. void *scribble;
  1470. percpu = per_cpu_ptr(conf->percpu, cpu);
  1471. scribble = kmalloc(conf->scribble_len, GFP_NOIO);
  1472. if (scribble) {
  1473. kfree(percpu->scribble);
  1474. percpu->scribble = scribble;
  1475. } else {
  1476. err = -ENOMEM;
  1477. break;
  1478. }
  1479. }
  1480. put_online_cpus();
  1481. /* Step 4, return new stripes to service */
  1482. while(!list_empty(&newstripes)) {
  1483. nsh = list_entry(newstripes.next, struct stripe_head, lru);
  1484. list_del_init(&nsh->lru);
  1485. for (i=conf->raid_disks; i < newsize; i++)
  1486. if (nsh->dev[i].page == NULL) {
  1487. struct page *p = alloc_page(GFP_NOIO);
  1488. nsh->dev[i].page = p;
  1489. if (!p)
  1490. err = -ENOMEM;
  1491. }
  1492. release_stripe(nsh);
  1493. }
  1494. /* critical section pass, GFP_NOIO no longer needed */
  1495. conf->slab_cache = sc;
  1496. conf->active_name = 1-conf->active_name;
  1497. conf->pool_size = newsize;
  1498. return err;
  1499. }
  1500. static int drop_one_stripe(struct r5conf *conf)
  1501. {
  1502. struct stripe_head *sh;
  1503. spin_lock_irq(&conf->device_lock);
  1504. sh = get_free_stripe(conf);
  1505. spin_unlock_irq(&conf->device_lock);
  1506. if (!sh)
  1507. return 0;
  1508. BUG_ON(atomic_read(&sh->count));
  1509. shrink_buffers(sh);
  1510. kmem_cache_free(conf->slab_cache, sh);
  1511. atomic_dec(&conf->active_stripes);
  1512. return 1;
  1513. }
  1514. static void shrink_stripes(struct r5conf *conf)
  1515. {
  1516. while (drop_one_stripe(conf))
  1517. ;
  1518. if (conf->slab_cache)
  1519. kmem_cache_destroy(conf->slab_cache);
  1520. conf->slab_cache = NULL;
  1521. }
  1522. static void raid5_end_read_request(struct bio * bi, int error)
  1523. {
  1524. struct stripe_head *sh = bi->bi_private;
  1525. struct r5conf *conf = sh->raid_conf;
  1526. int disks = sh->disks, i;
  1527. int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
  1528. char b[BDEVNAME_SIZE];
  1529. struct md_rdev *rdev = NULL;
  1530. sector_t s;
  1531. for (i=0 ; i<disks; i++)
  1532. if (bi == &sh->dev[i].req)
  1533. break;
  1534. pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
  1535. (unsigned long long)sh->sector, i, atomic_read(&sh->count),
  1536. uptodate);
  1537. if (i == disks) {
  1538. BUG();
  1539. return;
  1540. }
  1541. if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
  1542. /* If replacement finished while this request was outstanding,
  1543. * 'replacement' might be NULL already.
  1544. * In that case it moved down to 'rdev'.
  1545. * rdev is not removed until all requests are finished.
  1546. */
  1547. rdev = conf->disks[i].replacement;
  1548. if (!rdev)
  1549. rdev = conf->disks[i].rdev;
  1550. if (use_new_offset(conf, sh))
  1551. s = sh->sector + rdev->new_data_offset;
  1552. else
  1553. s = sh->sector + rdev->data_offset;
  1554. if (uptodate) {
  1555. set_bit(R5_UPTODATE, &sh->dev[i].flags);
  1556. if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
  1557. /* Note that this cannot happen on a
  1558. * replacement device. We just fail those on
  1559. * any error
  1560. */
  1561. printk_ratelimited(
  1562. KERN_INFO
  1563. "md/raid:%s: read error corrected"
  1564. " (%lu sectors at %llu on %s)\n",
  1565. mdname(conf->mddev), STRIPE_SECTORS,
  1566. (unsigned long long)s,
  1567. bdevname(rdev->bdev, b));
  1568. atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
  1569. clear_bit(R5_ReadError, &sh->dev[i].flags);
  1570. clear_bit(R5_ReWrite, &sh->dev[i].flags);
  1571. } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
  1572. clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
  1573. if (atomic_read(&rdev->read_errors))
  1574. atomic_set(&rdev->read_errors, 0);
  1575. } else {
  1576. const char *bdn = bdevname(rdev->bdev, b);
  1577. int retry = 0;
  1578. int set_bad = 0;
  1579. clear_bit(R5_UPTODATE, &sh->dev[i].flags);
  1580. atomic_inc(&rdev->read_errors);
  1581. if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
  1582. printk_ratelimited(
  1583. KERN_WARNING
  1584. "md/raid:%s: read error on replacement device "
  1585. "(sector %llu on %s).\n",
  1586. mdname(conf->mddev),
  1587. (unsigned long long)s,
  1588. bdn);
  1589. else if (conf->mddev->degraded >= conf->max_degraded) {
  1590. set_bad = 1;
  1591. printk_ratelimited(
  1592. KERN_WARNING
  1593. "md/raid:%s: read error not correctable "
  1594. "(sector %llu on %s).\n",
  1595. mdname(conf->mddev),
  1596. (unsigned long long)s,
  1597. bdn);
  1598. } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
  1599. /* Oh, no!!! */
  1600. set_bad = 1;
  1601. printk_ratelimited(
  1602. KERN_WARNING
  1603. "md/raid:%s: read error NOT corrected!! "
  1604. "(sector %llu on %s).\n",
  1605. mdname(conf->mddev),
  1606. (unsigned long long)s,
  1607. bdn);
  1608. } else if (atomic_read(&rdev->read_errors)
  1609. > conf->max_nr_stripes)
  1610. printk(KERN_WARNING
  1611. "md/raid:%s: Too many read errors, failing device %s.\n",
  1612. mdname(conf->mddev), bdn);
  1613. else
  1614. retry = 1;
  1615. if (retry)
  1616. if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
  1617. set_bit(R5_ReadError, &sh->dev[i].flags);
  1618. clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
  1619. } else
  1620. set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
  1621. else {
  1622. clear_bit(R5_ReadError, &sh->dev[i].flags);
  1623. clear_bit(R5_ReWrite, &sh->dev[i].flags);
  1624. if (!(set_bad
  1625. && test_bit(In_sync, &rdev->flags)
  1626. && rdev_set_badblocks(
  1627. rdev, sh->sector, STRIPE_SECTORS, 0)))
  1628. md_error(conf->mddev, rdev);
  1629. }
  1630. }
  1631. rdev_dec_pending(rdev, conf->mddev);
  1632. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  1633. set_bit(STRIPE_HANDLE, &sh->state);
  1634. release_stripe(sh);
  1635. }
  1636. static void raid5_end_write_request(struct bio *bi, int error)
  1637. {
  1638. struct stripe_head *sh = bi->bi_private;
  1639. struct r5conf *conf = sh->raid_conf;
  1640. int disks = sh->disks, i;
  1641. struct md_rdev *uninitialized_var(rdev);
  1642. int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
  1643. sector_t first_bad;
  1644. int bad_sectors;
  1645. int replacement = 0;
  1646. for (i = 0 ; i < disks; i++) {
  1647. if (bi == &sh->dev[i].req) {
  1648. rdev = conf->disks[i].rdev;
  1649. break;
  1650. }
  1651. if (bi == &sh->dev[i].rreq) {
  1652. rdev = conf->disks[i].replacement;
  1653. if (rdev)
  1654. replacement = 1;
  1655. else
  1656. /* rdev was removed and 'replacement'
  1657. * replaced it. rdev is not removed
  1658. * until all requests are finished.
  1659. */
  1660. rdev = conf->disks[i].rdev;
  1661. break;
  1662. }
  1663. }
  1664. pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
  1665. (unsigned long long)sh->sector, i, atomic_read(&sh->count),
  1666. uptodate);
  1667. if (i == disks) {
  1668. BUG();
  1669. return;
  1670. }
  1671. if (replacement) {
  1672. if (!uptodate)
  1673. md_error(conf->mddev, rdev);
  1674. else if (is_badblock(rdev, sh->sector,
  1675. STRIPE_SECTORS,
  1676. &first_bad, &bad_sectors))
  1677. set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
  1678. } else {
  1679. if (!uptodate) {
  1680. set_bit(WriteErrorSeen, &rdev->flags);
  1681. set_bit(R5_WriteError, &sh->dev[i].flags);
  1682. if (!test_and_set_bit(WantReplacement, &rdev->flags))
  1683. set_bit(MD_RECOVERY_NEEDED,
  1684. &rdev->mddev->recovery);
  1685. } else if (is_badblock(rdev, sh->sector,
  1686. STRIPE_SECTORS,
  1687. &first_bad, &bad_sectors))
  1688. set_bit(R5_MadeGood, &sh->dev[i].flags);
  1689. }
  1690. rdev_dec_pending(rdev, conf->mddev);
  1691. if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
  1692. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  1693. set_bit(STRIPE_HANDLE, &sh->state);
  1694. release_stripe(sh);
  1695. }
  1696. static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
  1697. static void raid5_build_block(struct stripe_head *sh, int i, int previous)
  1698. {
  1699. struct r5dev *dev = &sh->dev[i];
  1700. bio_init(&dev->req);
  1701. dev->req.bi_io_vec = &dev->vec;
  1702. dev->req.bi_vcnt++;
  1703. dev->req.bi_max_vecs++;
  1704. dev->req.bi_private = sh;
  1705. dev->vec.bv_page = dev->page;
  1706. bio_init(&dev->rreq);
  1707. dev->rreq.bi_io_vec = &dev->rvec;
  1708. dev->rreq.bi_vcnt++;
  1709. dev->rreq.bi_max_vecs++;
  1710. dev->rreq.bi_private = sh;
  1711. dev->rvec.bv_page = dev->page;
  1712. dev->flags = 0;
  1713. dev->sector = compute_blocknr(sh, i, previous);
  1714. }
  1715. static void error(struct mddev *mddev, struct md_rdev *rdev)
  1716. {
  1717. char b[BDEVNAME_SIZE];
  1718. struct r5conf *conf = mddev->private;
  1719. unsigned long flags;
  1720. pr_debug("raid456: error called\n");
  1721. spin_lock_irqsave(&conf->device_lock, flags);
  1722. clear_bit(In_sync, &rdev->flags);
  1723. mddev->degraded = calc_degraded(conf);
  1724. spin_unlock_irqrestore(&conf->device_lock, flags);
  1725. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1726. set_bit(Blocked, &rdev->flags);
  1727. set_bit(Faulty, &rdev->flags);
  1728. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1729. printk(KERN_ALERT
  1730. "md/raid:%s: Disk failure on %s, disabling device.\n"
  1731. "md/raid:%s: Operation continuing on %d devices.\n",
  1732. mdname(mddev),
  1733. bdevname(rdev->bdev, b),
  1734. mdname(mddev),
  1735. conf->raid_disks - mddev->degraded);
  1736. }
  1737. /*
  1738. * Input: a 'big' sector number,
  1739. * Output: index of the data and parity disk, and the sector # in them.
  1740. */
  1741. static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
  1742. int previous, int *dd_idx,
  1743. struct stripe_head *sh)
  1744. {
  1745. sector_t stripe, stripe2;
  1746. sector_t chunk_number;
  1747. unsigned int chunk_offset;
  1748. int pd_idx, qd_idx;
  1749. int ddf_layout = 0;
  1750. sector_t new_sector;
  1751. int algorithm = previous ? conf->prev_algo
  1752. : conf->algorithm;
  1753. int sectors_per_chunk = previous ? conf->prev_chunk_sectors
  1754. : conf->chunk_sectors;
  1755. int raid_disks = previous ? conf->previous_raid_disks
  1756. : conf->raid_disks;
  1757. int data_disks = raid_disks - conf->max_degraded;
  1758. /* First compute the information on this sector */
  1759. /*
  1760. * Compute the chunk number and the sector offset inside the chunk
  1761. */
  1762. chunk_offset = sector_div(r_sector, sectors_per_chunk);
  1763. chunk_number = r_sector;
  1764. /*
  1765. * Compute the stripe number
  1766. */
  1767. stripe = chunk_number;
  1768. *dd_idx = sector_div(stripe, data_disks);
  1769. stripe2 = stripe;
  1770. /*
  1771. * Select the parity disk based on the user selected algorithm.
  1772. */
  1773. pd_idx = qd_idx = -1;
  1774. switch(conf->level) {
  1775. case 4:
  1776. pd_idx = data_disks;
  1777. break;
  1778. case 5:
  1779. switch (algorithm) {
  1780. case ALGORITHM_LEFT_ASYMMETRIC:
  1781. pd_idx = data_disks - sector_div(stripe2, raid_disks);
  1782. if (*dd_idx >= pd_idx)
  1783. (*dd_idx)++;
  1784. break;
  1785. case ALGORITHM_RIGHT_ASYMMETRIC:
  1786. pd_idx = sector_div(stripe2, raid_disks);
  1787. if (*dd_idx >= pd_idx)
  1788. (*dd_idx)++;
  1789. break;
  1790. case ALGORITHM_LEFT_SYMMETRIC:
  1791. pd_idx = data_disks - sector_div(stripe2, raid_disks);
  1792. *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
  1793. break;
  1794. case ALGORITHM_RIGHT_SYMMETRIC:
  1795. pd_idx = sector_div(stripe2, raid_disks);
  1796. *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
  1797. break;
  1798. case ALGORITHM_PARITY_0:
  1799. pd_idx = 0;
  1800. (*dd_idx)++;
  1801. break;
  1802. case ALGORITHM_PARITY_N:
  1803. pd_idx = data_disks;
  1804. break;
  1805. default:
  1806. BUG();
  1807. }
  1808. break;
  1809. case 6:
  1810. switch (algorithm) {
  1811. case ALGORITHM_LEFT_ASYMMETRIC:
  1812. pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
  1813. qd_idx = pd_idx + 1;
  1814. if (pd_idx == raid_disks-1) {
  1815. (*dd_idx)++; /* Q D D D P */
  1816. qd_idx = 0;
  1817. } else if (*dd_idx >= pd_idx)
  1818. (*dd_idx) += 2; /* D D P Q D */
  1819. break;
  1820. case ALGORITHM_RIGHT_ASYMMETRIC:
  1821. pd_idx = sector_div(stripe2, raid_disks);
  1822. qd_idx = pd_idx + 1;
  1823. if (pd_idx == raid_disks-1) {
  1824. (*dd_idx)++; /* Q D D D P */
  1825. qd_idx = 0;
  1826. } else if (*dd_idx >= pd_idx)
  1827. (*dd_idx) += 2; /* D D P Q D */
  1828. break;
  1829. case ALGORITHM_LEFT_SYMMETRIC:
  1830. pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
  1831. qd_idx = (pd_idx + 1) % raid_disks;
  1832. *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
  1833. break;
  1834. case ALGORITHM_RIGHT_SYMMETRIC:
  1835. pd_idx = sector_div(stripe2, raid_disks);
  1836. qd_idx = (pd_idx + 1) % raid_disks;
  1837. *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
  1838. break;
  1839. case ALGORITHM_PARITY_0:
  1840. pd_idx = 0;
  1841. qd_idx = 1;
  1842. (*dd_idx) += 2;
  1843. break;
  1844. case ALGORITHM_PARITY_N:
  1845. pd_idx = data_disks;
  1846. qd_idx = data_disks + 1;
  1847. break;
  1848. case ALGORITHM_ROTATING_ZERO_RESTART:
  1849. /* Exactly the same as RIGHT_ASYMMETRIC, but or
  1850. * of blocks for computing Q is different.
  1851. */
  1852. pd_idx = sector_div(stripe2, raid_disks);
  1853. qd_idx = pd_idx + 1;
  1854. if (pd_idx == raid_disks-1) {
  1855. (*dd_idx)++; /* Q D D D P */
  1856. qd_idx = 0;
  1857. } else if (*dd_idx >= pd_idx)
  1858. (*dd_idx) += 2; /* D D P Q D */
  1859. ddf_layout = 1;
  1860. break;
  1861. case ALGORITHM_ROTATING_N_RESTART:
  1862. /* Same a left_asymmetric, by first stripe is
  1863. * D D D P Q rather than
  1864. * Q D D D P
  1865. */
  1866. stripe2 += 1;
  1867. pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
  1868. qd_idx = pd_idx + 1;
  1869. if (pd_idx == raid_disks-1) {
  1870. (*dd_idx)++; /* Q D D D P */
  1871. qd_idx = 0;
  1872. } else if (*dd_idx >= pd_idx)
  1873. (*dd_idx) += 2; /* D D P Q D */
  1874. ddf_layout = 1;
  1875. break;
  1876. case ALGORITHM_ROTATING_N_CONTINUE:
  1877. /* Same as left_symmetric but Q is before P */
  1878. pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
  1879. qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
  1880. *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
  1881. ddf_layout = 1;
  1882. break;
  1883. case ALGORITHM_LEFT_ASYMMETRIC_6:
  1884. /* RAID5 left_asymmetric, with Q on last device */
  1885. pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
  1886. if (*dd_idx >= pd_idx)
  1887. (*dd_idx)++;
  1888. qd_idx = raid_disks - 1;
  1889. break;
  1890. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  1891. pd_idx = sector_div(stripe2, raid_disks-1);
  1892. if (*dd_idx >= pd_idx)
  1893. (*dd_idx)++;
  1894. qd_idx = raid_disks - 1;
  1895. break;
  1896. case ALGORITHM_LEFT_SYMMETRIC_6:
  1897. pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
  1898. *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
  1899. qd_idx = raid_disks - 1;
  1900. break;
  1901. case ALGORITHM_RIGHT_SYMMETRIC_6:
  1902. pd_idx = sector_div(stripe2, raid_disks-1);
  1903. *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
  1904. qd_idx = raid_disks - 1;
  1905. break;
  1906. case ALGORITHM_PARITY_0_6:
  1907. pd_idx = 0;
  1908. (*dd_idx)++;
  1909. qd_idx = raid_disks - 1;
  1910. break;
  1911. default:
  1912. BUG();
  1913. }
  1914. break;
  1915. }
  1916. if (sh) {
  1917. sh->pd_idx = pd_idx;
  1918. sh->qd_idx = qd_idx;
  1919. sh->ddf_layout = ddf_layout;
  1920. }
  1921. /*
  1922. * Finally, compute the new sector number
  1923. */
  1924. new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
  1925. return new_sector;
  1926. }
  1927. static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
  1928. {
  1929. struct r5conf *conf = sh->raid_conf;
  1930. int raid_disks = sh->disks;
  1931. int data_disks = raid_disks - conf->max_degraded;
  1932. sector_t new_sector = sh->sector, check;
  1933. int sectors_per_chunk = previous ? conf->prev_chunk_sectors
  1934. : conf->chunk_sectors;
  1935. int algorithm = previous ? conf->prev_algo
  1936. : conf->algorithm;
  1937. sector_t stripe;
  1938. int chunk_offset;
  1939. sector_t chunk_number;
  1940. int dummy1, dd_idx = i;
  1941. sector_t r_sector;
  1942. struct stripe_head sh2;
  1943. chunk_offset = sector_div(new_sector, sectors_per_chunk);
  1944. stripe = new_sector;
  1945. if (i == sh->pd_idx)
  1946. return 0;
  1947. switch(conf->level) {
  1948. case 4: break;
  1949. case 5:
  1950. switch (algorithm) {
  1951. case ALGORITHM_LEFT_ASYMMETRIC:
  1952. case ALGORITHM_RIGHT_ASYMMETRIC:
  1953. if (i > sh->pd_idx)
  1954. i--;
  1955. break;
  1956. case ALGORITHM_LEFT_SYMMETRIC:
  1957. case ALGORITHM_RIGHT_SYMMETRIC:
  1958. if (i < sh->pd_idx)
  1959. i += raid_disks;
  1960. i -= (sh->pd_idx + 1);
  1961. break;
  1962. case ALGORITHM_PARITY_0:
  1963. i -= 1;
  1964. break;
  1965. case ALGORITHM_PARITY_N:
  1966. break;
  1967. default:
  1968. BUG();
  1969. }
  1970. break;
  1971. case 6:
  1972. if (i == sh->qd_idx)
  1973. return 0; /* It is the Q disk */
  1974. switch (algorithm) {
  1975. case ALGORITHM_LEFT_ASYMMETRIC:
  1976. case ALGORITHM_RIGHT_ASYMMETRIC:
  1977. case ALGORITHM_ROTATING_ZERO_RESTART:
  1978. case ALGORITHM_ROTATING_N_RESTART:
  1979. if (sh->pd_idx == raid_disks-1)
  1980. i--; /* Q D D D P */
  1981. else if (i > sh->pd_idx)
  1982. i -= 2; /* D D P Q D */
  1983. break;
  1984. case ALGORITHM_LEFT_SYMMETRIC:
  1985. case ALGORITHM_RIGHT_SYMMETRIC:
  1986. if (sh->pd_idx == raid_disks-1)
  1987. i--; /* Q D D D P */
  1988. else {
  1989. /* D D P Q D */
  1990. if (i < sh->pd_idx)
  1991. i += raid_disks;
  1992. i -= (sh->pd_idx + 2);
  1993. }
  1994. break;
  1995. case ALGORITHM_PARITY_0:
  1996. i -= 2;
  1997. break;
  1998. case ALGORITHM_PARITY_N:
  1999. break;
  2000. case ALGORITHM_ROTATING_N_CONTINUE:
  2001. /* Like left_symmetric, but P is before Q */
  2002. if (sh->pd_idx == 0)
  2003. i--; /* P D D D Q */
  2004. else {
  2005. /* D D Q P D */
  2006. if (i < sh->pd_idx)
  2007. i += raid_disks;
  2008. i -= (sh->pd_idx + 1);
  2009. }
  2010. break;
  2011. case ALGORITHM_LEFT_ASYMMETRIC_6:
  2012. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  2013. if (i > sh->pd_idx)
  2014. i--;
  2015. break;
  2016. case ALGORITHM_LEFT_SYMMETRIC_6:
  2017. case ALGORITHM_RIGHT_SYMMETRIC_6:
  2018. if (i < sh->pd_idx)
  2019. i += data_disks + 1;
  2020. i -= (sh->pd_idx + 1);
  2021. break;
  2022. case ALGORITHM_PARITY_0_6:
  2023. i -= 1;
  2024. break;
  2025. default:
  2026. BUG();
  2027. }
  2028. break;
  2029. }
  2030. chunk_number = stripe * data_disks + i;
  2031. r_sector = chunk_number * sectors_per_chunk + chunk_offset;
  2032. check = raid5_compute_sector(conf, r_sector,
  2033. previous, &dummy1, &sh2);
  2034. if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
  2035. || sh2.qd_idx != sh->qd_idx) {
  2036. printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
  2037. mdname(conf->mddev));
  2038. return 0;
  2039. }
  2040. return r_sector;
  2041. }
  2042. static void
  2043. schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
  2044. int rcw, int expand)
  2045. {
  2046. int i, pd_idx = sh->pd_idx, disks = sh->disks;
  2047. struct r5conf *conf = sh->raid_conf;
  2048. int level = conf->level;
  2049. if (rcw) {
  2050. /* if we are not expanding this is a proper write request, and
  2051. * there will be bios with new data to be drained into the
  2052. * stripe cache
  2053. */
  2054. if (!expand) {
  2055. sh->reconstruct_state = reconstruct_state_drain_run;
  2056. set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
  2057. } else
  2058. sh->reconstruct_state = reconstruct_state_run;
  2059. set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
  2060. for (i = disks; i--; ) {
  2061. struct r5dev *dev = &sh->dev[i];
  2062. if (dev->towrite) {
  2063. set_bit(R5_LOCKED, &dev->flags);
  2064. set_bit(R5_Wantdrain, &dev->flags);
  2065. if (!expand)
  2066. clear_bit(R5_UPTODATE, &dev->flags);
  2067. s->locked++;
  2068. }
  2069. }
  2070. if (s->locked + conf->max_degraded == disks)
  2071. if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
  2072. atomic_inc(&conf->pending_full_writes);
  2073. } else {
  2074. BUG_ON(level == 6);
  2075. BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
  2076. test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
  2077. sh->reconstruct_state = reconstruct_state_prexor_drain_run;
  2078. set_bit(STRIPE_OP_PREXOR, &s->ops_request);
  2079. set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
  2080. set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
  2081. for (i = disks; i--; ) {
  2082. struct r5dev *dev = &sh->dev[i];
  2083. if (i == pd_idx)
  2084. continue;
  2085. if (dev->towrite &&
  2086. (test_bit(R5_UPTODATE, &dev->flags) ||
  2087. test_bit(R5_Wantcompute, &dev->flags))) {
  2088. set_bit(R5_Wantdrain, &dev->flags);
  2089. set_bit(R5_LOCKED, &dev->flags);
  2090. clear_bit(R5_UPTODATE, &dev->flags);
  2091. s->locked++;
  2092. }
  2093. }
  2094. }
  2095. /* keep the parity disk(s) locked while asynchronous operations
  2096. * are in flight
  2097. */
  2098. set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
  2099. clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
  2100. s->locked++;
  2101. if (level == 6) {
  2102. int qd_idx = sh->qd_idx;
  2103. struct r5dev *dev = &sh->dev[qd_idx];
  2104. set_bit(R5_LOCKED, &dev->flags);
  2105. clear_bit(R5_UPTODATE, &dev->flags);
  2106. s->locked++;
  2107. }
  2108. pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
  2109. __func__, (unsigned long long)sh->sector,
  2110. s->locked, s->ops_request);
  2111. }
  2112. /*
  2113. * Each stripe/dev can have one or more bion attached.
  2114. * toread/towrite point to the first in a chain.
  2115. * The bi_next chain must be in order.
  2116. */
  2117. static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
  2118. {
  2119. struct bio **bip;
  2120. struct r5conf *conf = sh->raid_conf;
  2121. int firstwrite=0;
  2122. pr_debug("adding bi b#%llu to stripe s#%llu\n",
  2123. (unsigned long long)bi->bi_sector,
  2124. (unsigned long long)sh->sector);
  2125. /*
  2126. * If several bio share a stripe. The bio bi_phys_segments acts as a
  2127. * reference count to avoid race. The reference count should already be
  2128. * increased before this function is called (for example, in
  2129. * make_request()), so other bio sharing this stripe will not free the
  2130. * stripe. If a stripe is owned by one stripe, the stripe lock will
  2131. * protect it.
  2132. */
  2133. spin_lock_irq(&sh->stripe_lock);
  2134. if (forwrite) {
  2135. bip = &sh->dev[dd_idx].towrite;
  2136. if (*bip == NULL)
  2137. firstwrite = 1;
  2138. } else
  2139. bip = &sh->dev[dd_idx].toread;
  2140. while (*bip && (*bip)->bi_sector < bi->bi_sector) {
  2141. if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
  2142. goto overlap;
  2143. bip = & (*bip)->bi_next;
  2144. }
  2145. if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
  2146. goto overlap;
  2147. BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
  2148. if (*bip)
  2149. bi->bi_next = *bip;
  2150. *bip = bi;
  2151. raid5_inc_bi_active_stripes(bi);
  2152. if (forwrite) {
  2153. /* check if page is covered */
  2154. sector_t sector = sh->dev[dd_idx].sector;
  2155. for (bi=sh->dev[dd_idx].towrite;
  2156. sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
  2157. bi && bi->bi_sector <= sector;
  2158. bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
  2159. if (bi->bi_sector + (bi->bi_size>>9) >= sector)
  2160. sector = bi->bi_sector + (bi->bi_size>>9);
  2161. }
  2162. if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
  2163. set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
  2164. }
  2165. pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
  2166. (unsigned long long)(*bip)->bi_sector,
  2167. (unsigned long long)sh->sector, dd_idx);
  2168. spin_unlock_irq(&sh->stripe_lock);
  2169. if (conf->mddev->bitmap && firstwrite) {
  2170. bitmap_startwrite(conf->mddev->bitmap, sh->sector,
  2171. STRIPE_SECTORS, 0);
  2172. sh->bm_seq = conf->seq_flush+1;
  2173. set_bit(STRIPE_BIT_DELAY, &sh->state);
  2174. }
  2175. return 1;
  2176. overlap:
  2177. set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
  2178. spin_unlock_irq(&sh->stripe_lock);
  2179. return 0;
  2180. }
  2181. static void end_reshape(struct r5conf *conf);
  2182. static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
  2183. struct stripe_head *sh)
  2184. {
  2185. int sectors_per_chunk =
  2186. previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
  2187. int dd_idx;
  2188. int chunk_offset = sector_div(stripe, sectors_per_chunk);
  2189. int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
  2190. raid5_compute_sector(conf,
  2191. stripe * (disks - conf->max_degraded)
  2192. *sectors_per_chunk + chunk_offset,
  2193. previous,
  2194. &dd_idx, sh);
  2195. }
  2196. static void
  2197. handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
  2198. struct stripe_head_state *s, int disks,
  2199. struct bio **return_bi)
  2200. {
  2201. int i;
  2202. for (i = disks; i--; ) {
  2203. struct bio *bi;
  2204. int bitmap_end = 0;
  2205. if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
  2206. struct md_rdev *rdev;
  2207. rcu_read_lock();
  2208. rdev = rcu_dereference(conf->disks[i].rdev);
  2209. if (rdev && test_bit(In_sync, &rdev->flags))
  2210. atomic_inc(&rdev->nr_pending);
  2211. else
  2212. rdev = NULL;
  2213. rcu_read_unlock();
  2214. if (rdev) {
  2215. if (!rdev_set_badblocks(
  2216. rdev,
  2217. sh->sector,
  2218. STRIPE_SECTORS, 0))
  2219. md_error(conf->mddev, rdev);
  2220. rdev_dec_pending(rdev, conf->mddev);
  2221. }
  2222. }
  2223. spin_lock_irq(&sh->stripe_lock);
  2224. /* fail all writes first */
  2225. bi = sh->dev[i].towrite;
  2226. sh->dev[i].towrite = NULL;
  2227. spin_unlock_irq(&sh->stripe_lock);
  2228. if (bi)
  2229. bitmap_end = 1;
  2230. if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
  2231. wake_up(&conf->wait_for_overlap);
  2232. while (bi && bi->bi_sector <
  2233. sh->dev[i].sector + STRIPE_SECTORS) {
  2234. struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
  2235. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  2236. if (!raid5_dec_bi_active_stripes(bi)) {
  2237. md_write_end(conf->mddev);
  2238. bi->bi_next = *return_bi;
  2239. *return_bi = bi;
  2240. }
  2241. bi = nextbi;
  2242. }
  2243. if (bitmap_end)
  2244. bitmap_endwrite(conf->mddev->bitmap, sh->sector,
  2245. STRIPE_SECTORS, 0, 0);
  2246. bitmap_end = 0;
  2247. /* and fail all 'written' */
  2248. bi = sh->dev[i].written;
  2249. sh->dev[i].written = NULL;
  2250. if (bi) bitmap_end = 1;
  2251. while (bi && bi->bi_sector <
  2252. sh->dev[i].sector + STRIPE_SECTORS) {
  2253. struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
  2254. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  2255. if (!raid5_dec_bi_active_stripes(bi)) {
  2256. md_write_end(conf->mddev);
  2257. bi->bi_next = *return_bi;
  2258. *return_bi = bi;
  2259. }
  2260. bi = bi2;
  2261. }
  2262. /* fail any reads if this device is non-operational and
  2263. * the data has not reached the cache yet.
  2264. */
  2265. if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
  2266. (!test_bit(R5_Insync, &sh->dev[i].flags) ||
  2267. test_bit(R5_ReadError, &sh->dev[i].flags))) {
  2268. spin_lock_irq(&sh->stripe_lock);
  2269. bi = sh->dev[i].toread;
  2270. sh->dev[i].toread = NULL;
  2271. spin_unlock_irq(&sh->stripe_lock);
  2272. if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
  2273. wake_up(&conf->wait_for_overlap);
  2274. while (bi && bi->bi_sector <
  2275. sh->dev[i].sector + STRIPE_SECTORS) {
  2276. struct bio *nextbi =
  2277. r5_next_bio(bi, sh->dev[i].sector);
  2278. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  2279. if (!raid5_dec_bi_active_stripes(bi)) {
  2280. bi->bi_next = *return_bi;
  2281. *return_bi = bi;
  2282. }
  2283. bi = nextbi;
  2284. }
  2285. }
  2286. if (bitmap_end)
  2287. bitmap_endwrite(conf->mddev->bitmap, sh->sector,
  2288. STRIPE_SECTORS, 0, 0);
  2289. /* If we were in the middle of a write the parity block might
  2290. * still be locked - so just clear all R5_LOCKED flags
  2291. */
  2292. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  2293. }
  2294. if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
  2295. if (atomic_dec_and_test(&conf->pending_full_writes))
  2296. md_wakeup_thread(conf->mddev->thread);
  2297. }
  2298. static void
  2299. handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
  2300. struct stripe_head_state *s)
  2301. {
  2302. int abort = 0;
  2303. int i;
  2304. clear_bit(STRIPE_SYNCING, &sh->state);
  2305. s->syncing = 0;
  2306. s->replacing = 0;
  2307. /* There is nothing more to do for sync/check/repair.
  2308. * Don't even need to abort as that is handled elsewhere
  2309. * if needed, and not always wanted e.g. if there is a known
  2310. * bad block here.
  2311. * For recover/replace we need to record a bad block on all
  2312. * non-sync devices, or abort the recovery
  2313. */
  2314. if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
  2315. /* During recovery devices cannot be removed, so
  2316. * locking and refcounting of rdevs is not needed
  2317. */
  2318. for (i = 0; i < conf->raid_disks; i++) {
  2319. struct md_rdev *rdev = conf->disks[i].rdev;
  2320. if (rdev
  2321. && !test_bit(Faulty, &rdev->flags)
  2322. && !test_bit(In_sync, &rdev->flags)
  2323. && !rdev_set_badblocks(rdev, sh->sector,
  2324. STRIPE_SECTORS, 0))
  2325. abort = 1;
  2326. rdev = conf->disks[i].replacement;
  2327. if (rdev
  2328. && !test_bit(Faulty, &rdev->flags)
  2329. && !test_bit(In_sync, &rdev->flags)
  2330. && !rdev_set_badblocks(rdev, sh->sector,
  2331. STRIPE_SECTORS, 0))
  2332. abort = 1;
  2333. }
  2334. if (abort)
  2335. conf->recovery_disabled =
  2336. conf->mddev->recovery_disabled;
  2337. }
  2338. md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
  2339. }
  2340. static int want_replace(struct stripe_head *sh, int disk_idx)
  2341. {
  2342. struct md_rdev *rdev;
  2343. int rv = 0;
  2344. /* Doing recovery so rcu locking not required */
  2345. rdev = sh->raid_conf->disks[disk_idx].replacement;
  2346. if (rdev
  2347. && !test_bit(Faulty, &rdev->flags)
  2348. && !test_bit(In_sync, &rdev->flags)
  2349. && (rdev->recovery_offset <= sh->sector
  2350. || rdev->mddev->recovery_cp <= sh->sector))
  2351. rv = 1;
  2352. return rv;
  2353. }
  2354. /* fetch_block - checks the given member device to see if its data needs
  2355. * to be read or computed to satisfy a request.
  2356. *
  2357. * Returns 1 when no more member devices need to be checked, otherwise returns
  2358. * 0 to tell the loop in handle_stripe_fill to continue
  2359. */
  2360. static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
  2361. int disk_idx, int disks)
  2362. {
  2363. struct r5dev *dev = &sh->dev[disk_idx];
  2364. struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
  2365. &sh->dev[s->failed_num[1]] };
  2366. /* is the data in this block needed, and can we get it? */
  2367. if (!test_bit(R5_LOCKED, &dev->flags) &&
  2368. !test_bit(R5_UPTODATE, &dev->flags) &&
  2369. (dev->toread ||
  2370. (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
  2371. s->syncing || s->expanding ||
  2372. (s->replacing && want_replace(sh, disk_idx)) ||
  2373. (s->failed >= 1 && fdev[0]->toread) ||
  2374. (s->failed >= 2 && fdev[1]->toread) ||
  2375. (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
  2376. !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
  2377. (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
  2378. /* we would like to get this block, possibly by computing it,
  2379. * otherwise read it if the backing disk is insync
  2380. */
  2381. BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
  2382. BUG_ON(test_bit(R5_Wantread, &dev->flags));
  2383. if ((s->uptodate == disks - 1) &&
  2384. (s->failed && (disk_idx == s->failed_num[0] ||
  2385. disk_idx == s->failed_num[1]))) {
  2386. /* have disk failed, and we're requested to fetch it;
  2387. * do compute it
  2388. */
  2389. pr_debug("Computing stripe %llu block %d\n",
  2390. (unsigned long long)sh->sector, disk_idx);
  2391. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  2392. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  2393. set_bit(R5_Wantcompute, &dev->flags);
  2394. sh->ops.target = disk_idx;
  2395. sh->ops.target2 = -1; /* no 2nd target */
  2396. s->req_compute = 1;
  2397. /* Careful: from this point on 'uptodate' is in the eye
  2398. * of raid_run_ops which services 'compute' operations
  2399. * before writes. R5_Wantcompute flags a block that will
  2400. * be R5_UPTODATE by the time it is needed for a
  2401. * subsequent operation.
  2402. */
  2403. s->uptodate++;
  2404. return 1;
  2405. } else if (s->uptodate == disks-2 && s->failed >= 2) {
  2406. /* Computing 2-failure is *very* expensive; only
  2407. * do it if failed >= 2
  2408. */
  2409. int other;
  2410. for (other = disks; other--; ) {
  2411. if (other == disk_idx)
  2412. continue;
  2413. if (!test_bit(R5_UPTODATE,
  2414. &sh->dev[other].flags))
  2415. break;
  2416. }
  2417. BUG_ON(other < 0);
  2418. pr_debug("Computing stripe %llu blocks %d,%d\n",
  2419. (unsigned long long)sh->sector,
  2420. disk_idx, other);
  2421. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  2422. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  2423. set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
  2424. set_bit(R5_Wantcompute, &sh->dev[other].flags);
  2425. sh->ops.target = disk_idx;
  2426. sh->ops.target2 = other;
  2427. s->uptodate += 2;
  2428. s->req_compute = 1;
  2429. return 1;
  2430. } else if (test_bit(R5_Insync, &dev->flags)) {
  2431. set_bit(R5_LOCKED, &dev->flags);
  2432. set_bit(R5_Wantread, &dev->flags);
  2433. s->locked++;
  2434. pr_debug("Reading block %d (sync=%d)\n",
  2435. disk_idx, s->syncing);
  2436. }
  2437. }
  2438. return 0;
  2439. }
  2440. /**
  2441. * handle_stripe_fill - read or compute data to satisfy pending requests.
  2442. */
  2443. static void handle_stripe_fill(struct stripe_head *sh,
  2444. struct stripe_head_state *s,
  2445. int disks)
  2446. {
  2447. int i;
  2448. /* look for blocks to read/compute, skip this if a compute
  2449. * is already in flight, or if the stripe contents are in the
  2450. * midst of changing due to a write
  2451. */
  2452. if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
  2453. !sh->reconstruct_state)
  2454. for (i = disks; i--; )
  2455. if (fetch_block(sh, s, i, disks))
  2456. break;
  2457. set_bit(STRIPE_HANDLE, &sh->state);
  2458. }
  2459. /* handle_stripe_clean_event
  2460. * any written block on an uptodate or failed drive can be returned.
  2461. * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
  2462. * never LOCKED, so we don't need to test 'failed' directly.
  2463. */
  2464. static void handle_stripe_clean_event(struct r5conf *conf,
  2465. struct stripe_head *sh, int disks, struct bio **return_bi)
  2466. {
  2467. int i;
  2468. struct r5dev *dev;
  2469. for (i = disks; i--; )
  2470. if (sh->dev[i].written) {
  2471. dev = &sh->dev[i];
  2472. if (!test_bit(R5_LOCKED, &dev->flags) &&
  2473. (test_bit(R5_UPTODATE, &dev->flags) ||
  2474. test_and_clear_bit(R5_Discard, &dev->flags))) {
  2475. /* We can return any write requests */
  2476. struct bio *wbi, *wbi2;
  2477. pr_debug("Return write for disc %d\n", i);
  2478. wbi = dev->written;
  2479. dev->written = NULL;
  2480. while (wbi && wbi->bi_sector <
  2481. dev->sector + STRIPE_SECTORS) {
  2482. wbi2 = r5_next_bio(wbi, dev->sector);
  2483. if (!raid5_dec_bi_active_stripes(wbi)) {
  2484. md_write_end(conf->mddev);
  2485. wbi->bi_next = *return_bi;
  2486. *return_bi = wbi;
  2487. }
  2488. wbi = wbi2;
  2489. }
  2490. bitmap_endwrite(conf->mddev->bitmap, sh->sector,
  2491. STRIPE_SECTORS,
  2492. !test_bit(STRIPE_DEGRADED, &sh->state),
  2493. 0);
  2494. }
  2495. }
  2496. if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
  2497. if (atomic_dec_and_test(&conf->pending_full_writes))
  2498. md_wakeup_thread(conf->mddev->thread);
  2499. }
  2500. static void handle_stripe_dirtying(struct r5conf *conf,
  2501. struct stripe_head *sh,
  2502. struct stripe_head_state *s,
  2503. int disks)
  2504. {
  2505. int rmw = 0, rcw = 0, i;
  2506. sector_t recovery_cp = conf->mddev->recovery_cp;
  2507. /* RAID6 requires 'rcw' in current implementation.
  2508. * Otherwise, check whether resync is now happening or should start.
  2509. * If yes, then the array is dirty (after unclean shutdown or
  2510. * initial creation), so parity in some stripes might be inconsistent.
  2511. * In this case, we need to always do reconstruct-write, to ensure
  2512. * that in case of drive failure or read-error correction, we
  2513. * generate correct data from the parity.
  2514. */
  2515. if (conf->max_degraded == 2 ||
  2516. (recovery_cp < MaxSector && sh->sector >= recovery_cp)) {
  2517. /* Calculate the real rcw later - for now make it
  2518. * look like rcw is cheaper
  2519. */
  2520. rcw = 1; rmw = 2;
  2521. pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n",
  2522. conf->max_degraded, (unsigned long long)recovery_cp,
  2523. (unsigned long long)sh->sector);
  2524. } else for (i = disks; i--; ) {
  2525. /* would I have to read this buffer for read_modify_write */
  2526. struct r5dev *dev = &sh->dev[i];
  2527. if ((dev->towrite || i == sh->pd_idx) &&
  2528. !test_bit(R5_LOCKED, &dev->flags) &&
  2529. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2530. test_bit(R5_Wantcompute, &dev->flags))) {
  2531. if (test_bit(R5_Insync, &dev->flags))
  2532. rmw++;
  2533. else
  2534. rmw += 2*disks; /* cannot read it */
  2535. }
  2536. /* Would I have to read this buffer for reconstruct_write */
  2537. if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
  2538. !test_bit(R5_LOCKED, &dev->flags) &&
  2539. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2540. test_bit(R5_Wantcompute, &dev->flags))) {
  2541. if (test_bit(R5_Insync, &dev->flags)) rcw++;
  2542. else
  2543. rcw += 2*disks;
  2544. }
  2545. }
  2546. pr_debug("for sector %llu, rmw=%d rcw=%d\n",
  2547. (unsigned long long)sh->sector, rmw, rcw);
  2548. set_bit(STRIPE_HANDLE, &sh->state);
  2549. if (rmw < rcw && rmw > 0)
  2550. /* prefer read-modify-write, but need to get some data */
  2551. for (i = disks; i--; ) {
  2552. struct r5dev *dev = &sh->dev[i];
  2553. if ((dev->towrite || i == sh->pd_idx) &&
  2554. !test_bit(R5_LOCKED, &dev->flags) &&
  2555. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2556. test_bit(R5_Wantcompute, &dev->flags)) &&
  2557. test_bit(R5_Insync, &dev->flags)) {
  2558. if (
  2559. test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
  2560. pr_debug("Read_old block "
  2561. "%d for r-m-w\n", i);
  2562. set_bit(R5_LOCKED, &dev->flags);
  2563. set_bit(R5_Wantread, &dev->flags);
  2564. s->locked++;
  2565. } else {
  2566. set_bit(STRIPE_DELAYED, &sh->state);
  2567. set_bit(STRIPE_HANDLE, &sh->state);
  2568. }
  2569. }
  2570. }
  2571. if (rcw <= rmw && rcw > 0) {
  2572. /* want reconstruct write, but need to get some data */
  2573. rcw = 0;
  2574. for (i = disks; i--; ) {
  2575. struct r5dev *dev = &sh->dev[i];
  2576. if (!test_bit(R5_OVERWRITE, &dev->flags) &&
  2577. i != sh->pd_idx && i != sh->qd_idx &&
  2578. !test_bit(R5_LOCKED, &dev->flags) &&
  2579. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2580. test_bit(R5_Wantcompute, &dev->flags))) {
  2581. rcw++;
  2582. if (!test_bit(R5_Insync, &dev->flags))
  2583. continue; /* it's a failed drive */
  2584. if (
  2585. test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
  2586. pr_debug("Read_old block "
  2587. "%d for Reconstruct\n", i);
  2588. set_bit(R5_LOCKED, &dev->flags);
  2589. set_bit(R5_Wantread, &dev->flags);
  2590. s->locked++;
  2591. } else {
  2592. set_bit(STRIPE_DELAYED, &sh->state);
  2593. set_bit(STRIPE_HANDLE, &sh->state);
  2594. }
  2595. }
  2596. }
  2597. }
  2598. /* now if nothing is locked, and if we have enough data,
  2599. * we can start a write request
  2600. */
  2601. /* since handle_stripe can be called at any time we need to handle the
  2602. * case where a compute block operation has been submitted and then a
  2603. * subsequent call wants to start a write request. raid_run_ops only
  2604. * handles the case where compute block and reconstruct are requested
  2605. * simultaneously. If this is not the case then new writes need to be
  2606. * held off until the compute completes.
  2607. */
  2608. if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
  2609. (s->locked == 0 && (rcw == 0 || rmw == 0) &&
  2610. !test_bit(STRIPE_BIT_DELAY, &sh->state)))
  2611. schedule_reconstruction(sh, s, rcw == 0, 0);
  2612. }
  2613. static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
  2614. struct stripe_head_state *s, int disks)
  2615. {
  2616. struct r5dev *dev = NULL;
  2617. set_bit(STRIPE_HANDLE, &sh->state);
  2618. switch (sh->check_state) {
  2619. case check_state_idle:
  2620. /* start a new check operation if there are no failures */
  2621. if (s->failed == 0) {
  2622. BUG_ON(s->uptodate != disks);
  2623. sh->check_state = check_state_run;
  2624. set_bit(STRIPE_OP_CHECK, &s->ops_request);
  2625. clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
  2626. s->uptodate--;
  2627. break;
  2628. }
  2629. dev = &sh->dev[s->failed_num[0]];
  2630. /* fall through */
  2631. case check_state_compute_result:
  2632. sh->check_state = check_state_idle;
  2633. if (!dev)
  2634. dev = &sh->dev[sh->pd_idx];
  2635. /* check that a write has not made the stripe insync */
  2636. if (test_bit(STRIPE_INSYNC, &sh->state))
  2637. break;
  2638. /* either failed parity check, or recovery is happening */
  2639. BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
  2640. BUG_ON(s->uptodate != disks);
  2641. set_bit(R5_LOCKED, &dev->flags);
  2642. s->locked++;
  2643. set_bit(R5_Wantwrite, &dev->flags);
  2644. clear_bit(STRIPE_DEGRADED, &sh->state);
  2645. set_bit(STRIPE_INSYNC, &sh->state);
  2646. break;
  2647. case check_state_run:
  2648. break; /* we will be called again upon completion */
  2649. case check_state_check_result:
  2650. sh->check_state = check_state_idle;
  2651. /* if a failure occurred during the check operation, leave
  2652. * STRIPE_INSYNC not set and let the stripe be handled again
  2653. */
  2654. if (s->failed)
  2655. break;
  2656. /* handle a successful check operation, if parity is correct
  2657. * we are done. Otherwise update the mismatch count and repair
  2658. * parity if !MD_RECOVERY_CHECK
  2659. */
  2660. if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
  2661. /* parity is correct (on disc,
  2662. * not in buffer any more)
  2663. */
  2664. set_bit(STRIPE_INSYNC, &sh->state);
  2665. else {
  2666. atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
  2667. if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
  2668. /* don't try to repair!! */
  2669. set_bit(STRIPE_INSYNC, &sh->state);
  2670. else {
  2671. sh->check_state = check_state_compute_run;
  2672. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  2673. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  2674. set_bit(R5_Wantcompute,
  2675. &sh->dev[sh->pd_idx].flags);
  2676. sh->ops.target = sh->pd_idx;
  2677. sh->ops.target2 = -1;
  2678. s->uptodate++;
  2679. }
  2680. }
  2681. break;
  2682. case check_state_compute_run:
  2683. break;
  2684. default:
  2685. printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
  2686. __func__, sh->check_state,
  2687. (unsigned long long) sh->sector);
  2688. BUG();
  2689. }
  2690. }
  2691. static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
  2692. struct stripe_head_state *s,
  2693. int disks)
  2694. {
  2695. int pd_idx = sh->pd_idx;
  2696. int qd_idx = sh->qd_idx;
  2697. struct r5dev *dev;
  2698. set_bit(STRIPE_HANDLE, &sh->state);
  2699. BUG_ON(s->failed > 2);
  2700. /* Want to check and possibly repair P and Q.
  2701. * However there could be one 'failed' device, in which
  2702. * case we can only check one of them, possibly using the
  2703. * other to generate missing data
  2704. */
  2705. switch (sh->check_state) {
  2706. case check_state_idle:
  2707. /* start a new check operation if there are < 2 failures */
  2708. if (s->failed == s->q_failed) {
  2709. /* The only possible failed device holds Q, so it
  2710. * makes sense to check P (If anything else were failed,
  2711. * we would have used P to recreate it).
  2712. */
  2713. sh->check_state = check_state_run;
  2714. }
  2715. if (!s->q_failed && s->failed < 2) {
  2716. /* Q is not failed, and we didn't use it to generate
  2717. * anything, so it makes sense to check it
  2718. */
  2719. if (sh->check_state == check_state_run)
  2720. sh->check_state = check_state_run_pq;
  2721. else
  2722. sh->check_state = check_state_run_q;
  2723. }
  2724. /* discard potentially stale zero_sum_result */
  2725. sh->ops.zero_sum_result = 0;
  2726. if (sh->check_state == check_state_run) {
  2727. /* async_xor_zero_sum destroys the contents of P */
  2728. clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
  2729. s->uptodate--;
  2730. }
  2731. if (sh->check_state >= check_state_run &&
  2732. sh->check_state <= check_state_run_pq) {
  2733. /* async_syndrome_zero_sum preserves P and Q, so
  2734. * no need to mark them !uptodate here
  2735. */
  2736. set_bit(STRIPE_OP_CHECK, &s->ops_request);
  2737. break;
  2738. }
  2739. /* we have 2-disk failure */
  2740. BUG_ON(s->failed != 2);
  2741. /* fall through */
  2742. case check_state_compute_result:
  2743. sh->check_state = check_state_idle;
  2744. /* check that a write has not made the stripe insync */
  2745. if (test_bit(STRIPE_INSYNC, &sh->state))
  2746. break;
  2747. /* now write out any block on a failed drive,
  2748. * or P or Q if they were recomputed
  2749. */
  2750. BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
  2751. if (s->failed == 2) {
  2752. dev = &sh->dev[s->failed_num[1]];
  2753. s->locked++;
  2754. set_bit(R5_LOCKED, &dev->flags);
  2755. set_bit(R5_Wantwrite, &dev->flags);
  2756. }
  2757. if (s->failed >= 1) {
  2758. dev = &sh->dev[s->failed_num[0]];
  2759. s->locked++;
  2760. set_bit(R5_LOCKED, &dev->flags);
  2761. set_bit(R5_Wantwrite, &dev->flags);
  2762. }
  2763. if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
  2764. dev = &sh->dev[pd_idx];
  2765. s->locked++;
  2766. set_bit(R5_LOCKED, &dev->flags);
  2767. set_bit(R5_Wantwrite, &dev->flags);
  2768. }
  2769. if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
  2770. dev = &sh->dev[qd_idx];
  2771. s->locked++;
  2772. set_bit(R5_LOCKED, &dev->flags);
  2773. set_bit(R5_Wantwrite, &dev->flags);
  2774. }
  2775. clear_bit(STRIPE_DEGRADED, &sh->state);
  2776. set_bit(STRIPE_INSYNC, &sh->state);
  2777. break;
  2778. case check_state_run:
  2779. case check_state_run_q:
  2780. case check_state_run_pq:
  2781. break; /* we will be called again upon completion */
  2782. case check_state_check_result:
  2783. sh->check_state = check_state_idle;
  2784. /* handle a successful check operation, if parity is correct
  2785. * we are done. Otherwise update the mismatch count and repair
  2786. * parity if !MD_RECOVERY_CHECK
  2787. */
  2788. if (sh->ops.zero_sum_result == 0) {
  2789. /* both parities are correct */
  2790. if (!s->failed)
  2791. set_bit(STRIPE_INSYNC, &sh->state);
  2792. else {
  2793. /* in contrast to the raid5 case we can validate
  2794. * parity, but still have a failure to write
  2795. * back
  2796. */
  2797. sh->check_state = check_state_compute_result;
  2798. /* Returning at this point means that we may go
  2799. * off and bring p and/or q uptodate again so
  2800. * we make sure to check zero_sum_result again
  2801. * to verify if p or q need writeback
  2802. */
  2803. }
  2804. } else {
  2805. atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
  2806. if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
  2807. /* don't try to repair!! */
  2808. set_bit(STRIPE_INSYNC, &sh->state);
  2809. else {
  2810. int *target = &sh->ops.target;
  2811. sh->ops.target = -1;
  2812. sh->ops.target2 = -1;
  2813. sh->check_state = check_state_compute_run;
  2814. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  2815. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  2816. if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
  2817. set_bit(R5_Wantcompute,
  2818. &sh->dev[pd_idx].flags);
  2819. *target = pd_idx;
  2820. target = &sh->ops.target2;
  2821. s->uptodate++;
  2822. }
  2823. if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
  2824. set_bit(R5_Wantcompute,
  2825. &sh->dev[qd_idx].flags);
  2826. *target = qd_idx;
  2827. s->uptodate++;
  2828. }
  2829. }
  2830. }
  2831. break;
  2832. case check_state_compute_run:
  2833. break;
  2834. default:
  2835. printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
  2836. __func__, sh->check_state,
  2837. (unsigned long long) sh->sector);
  2838. BUG();
  2839. }
  2840. }
  2841. static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
  2842. {
  2843. int i;
  2844. /* We have read all the blocks in this stripe and now we need to
  2845. * copy some of them into a target stripe for expand.
  2846. */
  2847. struct dma_async_tx_descriptor *tx = NULL;
  2848. clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
  2849. for (i = 0; i < sh->disks; i++)
  2850. if (i != sh->pd_idx && i != sh->qd_idx) {
  2851. int dd_idx, j;
  2852. struct stripe_head *sh2;
  2853. struct async_submit_ctl submit;
  2854. sector_t bn = compute_blocknr(sh, i, 1);
  2855. sector_t s = raid5_compute_sector(conf, bn, 0,
  2856. &dd_idx, NULL);
  2857. sh2 = get_active_stripe(conf, s, 0, 1, 1);
  2858. if (sh2 == NULL)
  2859. /* so far only the early blocks of this stripe
  2860. * have been requested. When later blocks
  2861. * get requested, we will try again
  2862. */
  2863. continue;
  2864. if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
  2865. test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
  2866. /* must have already done this block */
  2867. release_stripe(sh2);
  2868. continue;
  2869. }
  2870. /* place all the copies on one channel */
  2871. init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
  2872. tx = async_memcpy(sh2->dev[dd_idx].page,
  2873. sh->dev[i].page, 0, 0, STRIPE_SIZE,
  2874. &submit);
  2875. set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
  2876. set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
  2877. for (j = 0; j < conf->raid_disks; j++)
  2878. if (j != sh2->pd_idx &&
  2879. j != sh2->qd_idx &&
  2880. !test_bit(R5_Expanded, &sh2->dev[j].flags))
  2881. break;
  2882. if (j == conf->raid_disks) {
  2883. set_bit(STRIPE_EXPAND_READY, &sh2->state);
  2884. set_bit(STRIPE_HANDLE, &sh2->state);
  2885. }
  2886. release_stripe(sh2);
  2887. }
  2888. /* done submitting copies, wait for them to complete */
  2889. if (tx) {
  2890. async_tx_ack(tx);
  2891. dma_wait_for_async_tx(tx);
  2892. }
  2893. }
  2894. /*
  2895. * handle_stripe - do things to a stripe.
  2896. *
  2897. * We lock the stripe by setting STRIPE_ACTIVE and then examine the
  2898. * state of various bits to see what needs to be done.
  2899. * Possible results:
  2900. * return some read requests which now have data
  2901. * return some write requests which are safely on storage
  2902. * schedule a read on some buffers
  2903. * schedule a write of some buffers
  2904. * return confirmation of parity correctness
  2905. *
  2906. */
  2907. static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
  2908. {
  2909. struct r5conf *conf = sh->raid_conf;
  2910. int disks = sh->disks;
  2911. struct r5dev *dev;
  2912. int i;
  2913. int do_recovery = 0;
  2914. memset(s, 0, sizeof(*s));
  2915. s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
  2916. s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
  2917. s->failed_num[0] = -1;
  2918. s->failed_num[1] = -1;
  2919. /* Now to look around and see what can be done */
  2920. rcu_read_lock();
  2921. for (i=disks; i--; ) {
  2922. struct md_rdev *rdev;
  2923. sector_t first_bad;
  2924. int bad_sectors;
  2925. int is_bad = 0;
  2926. dev = &sh->dev[i];
  2927. pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
  2928. i, dev->flags,
  2929. dev->toread, dev->towrite, dev->written);
  2930. /* maybe we can reply to a read
  2931. *
  2932. * new wantfill requests are only permitted while
  2933. * ops_complete_biofill is guaranteed to be inactive
  2934. */
  2935. if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
  2936. !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
  2937. set_bit(R5_Wantfill, &dev->flags);
  2938. /* now count some things */
  2939. if (test_bit(R5_LOCKED, &dev->flags))
  2940. s->locked++;
  2941. if (test_bit(R5_UPTODATE, &dev->flags))
  2942. s->uptodate++;
  2943. if (test_bit(R5_Wantcompute, &dev->flags)) {
  2944. s->compute++;
  2945. BUG_ON(s->compute > 2);
  2946. }
  2947. if (test_bit(R5_Wantfill, &dev->flags))
  2948. s->to_fill++;
  2949. else if (dev->toread)
  2950. s->to_read++;
  2951. if (dev->towrite) {
  2952. s->to_write++;
  2953. if (!test_bit(R5_OVERWRITE, &dev->flags))
  2954. s->non_overwrite++;
  2955. }
  2956. if (dev->written)
  2957. s->written++;
  2958. /* Prefer to use the replacement for reads, but only
  2959. * if it is recovered enough and has no bad blocks.
  2960. */
  2961. rdev = rcu_dereference(conf->disks[i].replacement);
  2962. if (rdev && !test_bit(Faulty, &rdev->flags) &&
  2963. rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
  2964. !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
  2965. &first_bad, &bad_sectors))
  2966. set_bit(R5_ReadRepl, &dev->flags);
  2967. else {
  2968. if (rdev)
  2969. set_bit(R5_NeedReplace, &dev->flags);
  2970. rdev = rcu_dereference(conf->disks[i].rdev);
  2971. clear_bit(R5_ReadRepl, &dev->flags);
  2972. }
  2973. if (rdev && test_bit(Faulty, &rdev->flags))
  2974. rdev = NULL;
  2975. if (rdev) {
  2976. is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
  2977. &first_bad, &bad_sectors);
  2978. if (s->blocked_rdev == NULL
  2979. && (test_bit(Blocked, &rdev->flags)
  2980. || is_bad < 0)) {
  2981. if (is_bad < 0)
  2982. set_bit(BlockedBadBlocks,
  2983. &rdev->flags);
  2984. s->blocked_rdev = rdev;
  2985. atomic_inc(&rdev->nr_pending);
  2986. }
  2987. }
  2988. clear_bit(R5_Insync, &dev->flags);
  2989. if (!rdev)
  2990. /* Not in-sync */;
  2991. else if (is_bad) {
  2992. /* also not in-sync */
  2993. if (!test_bit(WriteErrorSeen, &rdev->flags) &&
  2994. test_bit(R5_UPTODATE, &dev->flags)) {
  2995. /* treat as in-sync, but with a read error
  2996. * which we can now try to correct
  2997. */
  2998. set_bit(R5_Insync, &dev->flags);
  2999. set_bit(R5_ReadError, &dev->flags);
  3000. }
  3001. } else if (test_bit(In_sync, &rdev->flags))
  3002. set_bit(R5_Insync, &dev->flags);
  3003. else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
  3004. /* in sync if before recovery_offset */
  3005. set_bit(R5_Insync, &dev->flags);
  3006. else if (test_bit(R5_UPTODATE, &dev->flags) &&
  3007. test_bit(R5_Expanded, &dev->flags))
  3008. /* If we've reshaped into here, we assume it is Insync.
  3009. * We will shortly update recovery_offset to make
  3010. * it official.
  3011. */
  3012. set_bit(R5_Insync, &dev->flags);
  3013. if (rdev && test_bit(R5_WriteError, &dev->flags)) {
  3014. /* This flag does not apply to '.replacement'
  3015. * only to .rdev, so make sure to check that*/
  3016. struct md_rdev *rdev2 = rcu_dereference(
  3017. conf->disks[i].rdev);
  3018. if (rdev2 == rdev)
  3019. clear_bit(R5_Insync, &dev->flags);
  3020. if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
  3021. s->handle_bad_blocks = 1;
  3022. atomic_inc(&rdev2->nr_pending);
  3023. } else
  3024. clear_bit(R5_WriteError, &dev->flags);
  3025. }
  3026. if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
  3027. /* This flag does not apply to '.replacement'
  3028. * only to .rdev, so make sure to check that*/
  3029. struct md_rdev *rdev2 = rcu_dereference(
  3030. conf->disks[i].rdev);
  3031. if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
  3032. s->handle_bad_blocks = 1;
  3033. atomic_inc(&rdev2->nr_pending);
  3034. } else
  3035. clear_bit(R5_MadeGood, &dev->flags);
  3036. }
  3037. if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
  3038. struct md_rdev *rdev2 = rcu_dereference(
  3039. conf->disks[i].replacement);
  3040. if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
  3041. s->handle_bad_blocks = 1;
  3042. atomic_inc(&rdev2->nr_pending);
  3043. } else
  3044. clear_bit(R5_MadeGoodRepl, &dev->flags);
  3045. }
  3046. if (!test_bit(R5_Insync, &dev->flags)) {
  3047. /* The ReadError flag will just be confusing now */
  3048. clear_bit(R5_ReadError, &dev->flags);
  3049. clear_bit(R5_ReWrite, &dev->flags);
  3050. }
  3051. if (test_bit(R5_ReadError, &dev->flags))
  3052. clear_bit(R5_Insync, &dev->flags);
  3053. if (!test_bit(R5_Insync, &dev->flags)) {
  3054. if (s->failed < 2)
  3055. s->failed_num[s->failed] = i;
  3056. s->failed++;
  3057. if (rdev && !test_bit(Faulty, &rdev->flags))
  3058. do_recovery = 1;
  3059. }
  3060. }
  3061. if (test_bit(STRIPE_SYNCING, &sh->state)) {
  3062. /* If there is a failed device being replaced,
  3063. * we must be recovering.
  3064. * else if we are after recovery_cp, we must be syncing
  3065. * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
  3066. * else we can only be replacing
  3067. * sync and recovery both need to read all devices, and so
  3068. * use the same flag.
  3069. */
  3070. if (do_recovery ||
  3071. sh->sector >= conf->mddev->recovery_cp ||
  3072. test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
  3073. s->syncing = 1;
  3074. else
  3075. s->replacing = 1;
  3076. }
  3077. rcu_read_unlock();
  3078. }
  3079. static void handle_stripe(struct stripe_head *sh)
  3080. {
  3081. struct stripe_head_state s;
  3082. struct r5conf *conf = sh->raid_conf;
  3083. int i;
  3084. int prexor;
  3085. int disks = sh->disks;
  3086. struct r5dev *pdev, *qdev;
  3087. clear_bit(STRIPE_HANDLE, &sh->state);
  3088. if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
  3089. /* already being handled, ensure it gets handled
  3090. * again when current action finishes */
  3091. set_bit(STRIPE_HANDLE, &sh->state);
  3092. return;
  3093. }
  3094. if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
  3095. set_bit(STRIPE_SYNCING, &sh->state);
  3096. clear_bit(STRIPE_INSYNC, &sh->state);
  3097. }
  3098. clear_bit(STRIPE_DELAYED, &sh->state);
  3099. pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
  3100. "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
  3101. (unsigned long long)sh->sector, sh->state,
  3102. atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
  3103. sh->check_state, sh->reconstruct_state);
  3104. analyse_stripe(sh, &s);
  3105. if (s.handle_bad_blocks) {
  3106. set_bit(STRIPE_HANDLE, &sh->state);
  3107. goto finish;
  3108. }
  3109. if (unlikely(s.blocked_rdev)) {
  3110. if (s.syncing || s.expanding || s.expanded ||
  3111. s.replacing || s.to_write || s.written) {
  3112. set_bit(STRIPE_HANDLE, &sh->state);
  3113. goto finish;
  3114. }
  3115. /* There is nothing for the blocked_rdev to block */
  3116. rdev_dec_pending(s.blocked_rdev, conf->mddev);
  3117. s.blocked_rdev = NULL;
  3118. }
  3119. if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
  3120. set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
  3121. set_bit(STRIPE_BIOFILL_RUN, &sh->state);
  3122. }
  3123. pr_debug("locked=%d uptodate=%d to_read=%d"
  3124. " to_write=%d failed=%d failed_num=%d,%d\n",
  3125. s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
  3126. s.failed_num[0], s.failed_num[1]);
  3127. /* check if the array has lost more than max_degraded devices and,
  3128. * if so, some requests might need to be failed.
  3129. */
  3130. if (s.failed > conf->max_degraded) {
  3131. sh->check_state = 0;
  3132. sh->reconstruct_state = 0;
  3133. if (s.to_read+s.to_write+s.written)
  3134. handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
  3135. if (s.syncing + s.replacing)
  3136. handle_failed_sync(conf, sh, &s);
  3137. }
  3138. /*
  3139. * might be able to return some write requests if the parity blocks
  3140. * are safe, or on a failed drive
  3141. */
  3142. pdev = &sh->dev[sh->pd_idx];
  3143. s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
  3144. || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
  3145. qdev = &sh->dev[sh->qd_idx];
  3146. s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
  3147. || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
  3148. || conf->level < 6;
  3149. if (s.written &&
  3150. (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
  3151. && !test_bit(R5_LOCKED, &pdev->flags)
  3152. && (test_bit(R5_UPTODATE, &pdev->flags) ||
  3153. test_bit(R5_Discard, &pdev->flags))))) &&
  3154. (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
  3155. && !test_bit(R5_LOCKED, &qdev->flags)
  3156. && (test_bit(R5_UPTODATE, &qdev->flags) ||
  3157. test_bit(R5_Discard, &qdev->flags))))))
  3158. handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
  3159. /* Now we might consider reading some blocks, either to check/generate
  3160. * parity, or to satisfy requests
  3161. * or to load a block that is being partially written.
  3162. */
  3163. if (s.to_read || s.non_overwrite
  3164. || (conf->level == 6 && s.to_write && s.failed)
  3165. || (s.syncing && (s.uptodate + s.compute < disks))
  3166. || s.replacing
  3167. || s.expanding)
  3168. handle_stripe_fill(sh, &s, disks);
  3169. /* Now we check to see if any write operations have recently
  3170. * completed
  3171. */
  3172. prexor = 0;
  3173. if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
  3174. prexor = 1;
  3175. if (sh->reconstruct_state == reconstruct_state_drain_result ||
  3176. sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
  3177. sh->reconstruct_state = reconstruct_state_idle;
  3178. /* All the 'written' buffers and the parity block are ready to
  3179. * be written back to disk
  3180. */
  3181. BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
  3182. !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
  3183. BUG_ON(sh->qd_idx >= 0 &&
  3184. !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
  3185. !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
  3186. for (i = disks; i--; ) {
  3187. struct r5dev *dev = &sh->dev[i];
  3188. if (test_bit(R5_LOCKED, &dev->flags) &&
  3189. (i == sh->pd_idx || i == sh->qd_idx ||
  3190. dev->written)) {
  3191. pr_debug("Writing block %d\n", i);
  3192. set_bit(R5_Wantwrite, &dev->flags);
  3193. if (prexor)
  3194. continue;
  3195. if (!test_bit(R5_Insync, &dev->flags) ||
  3196. ((i == sh->pd_idx || i == sh->qd_idx) &&
  3197. s.failed == 0))
  3198. set_bit(STRIPE_INSYNC, &sh->state);
  3199. }
  3200. }
  3201. if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  3202. s.dec_preread_active = 1;
  3203. }
  3204. /* Now to consider new write requests and what else, if anything
  3205. * should be read. We do not handle new writes when:
  3206. * 1/ A 'write' operation (copy+xor) is already in flight.
  3207. * 2/ A 'check' operation is in flight, as it may clobber the parity
  3208. * block.
  3209. */
  3210. if (s.to_write && !sh->reconstruct_state && !sh->check_state)
  3211. handle_stripe_dirtying(conf, sh, &s, disks);
  3212. /* maybe we need to check and possibly fix the parity for this stripe
  3213. * Any reads will already have been scheduled, so we just see if enough
  3214. * data is available. The parity check is held off while parity
  3215. * dependent operations are in flight.
  3216. */
  3217. if (sh->check_state ||
  3218. (s.syncing && s.locked == 0 &&
  3219. !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
  3220. !test_bit(STRIPE_INSYNC, &sh->state))) {
  3221. if (conf->level == 6)
  3222. handle_parity_checks6(conf, sh, &s, disks);
  3223. else
  3224. handle_parity_checks5(conf, sh, &s, disks);
  3225. }
  3226. if (s.replacing && s.locked == 0
  3227. && !test_bit(STRIPE_INSYNC, &sh->state)) {
  3228. /* Write out to replacement devices where possible */
  3229. for (i = 0; i < conf->raid_disks; i++)
  3230. if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
  3231. test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
  3232. set_bit(R5_WantReplace, &sh->dev[i].flags);
  3233. set_bit(R5_LOCKED, &sh->dev[i].flags);
  3234. s.locked++;
  3235. }
  3236. set_bit(STRIPE_INSYNC, &sh->state);
  3237. }
  3238. if ((s.syncing || s.replacing) && s.locked == 0 &&
  3239. test_bit(STRIPE_INSYNC, &sh->state)) {
  3240. md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
  3241. clear_bit(STRIPE_SYNCING, &sh->state);
  3242. }
  3243. /* If the failed drives are just a ReadError, then we might need
  3244. * to progress the repair/check process
  3245. */
  3246. if (s.failed <= conf->max_degraded && !conf->mddev->ro)
  3247. for (i = 0; i < s.failed; i++) {
  3248. struct r5dev *dev = &sh->dev[s.failed_num[i]];
  3249. if (test_bit(R5_ReadError, &dev->flags)
  3250. && !test_bit(R5_LOCKED, &dev->flags)
  3251. && test_bit(R5_UPTODATE, &dev->flags)
  3252. ) {
  3253. if (!test_bit(R5_ReWrite, &dev->flags)) {
  3254. set_bit(R5_Wantwrite, &dev->flags);
  3255. set_bit(R5_ReWrite, &dev->flags);
  3256. set_bit(R5_LOCKED, &dev->flags);
  3257. s.locked++;
  3258. } else {
  3259. /* let's read it back */
  3260. set_bit(R5_Wantread, &dev->flags);
  3261. set_bit(R5_LOCKED, &dev->flags);
  3262. s.locked++;
  3263. }
  3264. }
  3265. }
  3266. /* Finish reconstruct operations initiated by the expansion process */
  3267. if (sh->reconstruct_state == reconstruct_state_result) {
  3268. struct stripe_head *sh_src
  3269. = get_active_stripe(conf, sh->sector, 1, 1, 1);
  3270. if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
  3271. /* sh cannot be written until sh_src has been read.
  3272. * so arrange for sh to be delayed a little
  3273. */
  3274. set_bit(STRIPE_DELAYED, &sh->state);
  3275. set_bit(STRIPE_HANDLE, &sh->state);
  3276. if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
  3277. &sh_src->state))
  3278. atomic_inc(&conf->preread_active_stripes);
  3279. release_stripe(sh_src);
  3280. goto finish;
  3281. }
  3282. if (sh_src)
  3283. release_stripe(sh_src);
  3284. sh->reconstruct_state = reconstruct_state_idle;
  3285. clear_bit(STRIPE_EXPANDING, &sh->state);
  3286. for (i = conf->raid_disks; i--; ) {
  3287. set_bit(R5_Wantwrite, &sh->dev[i].flags);
  3288. set_bit(R5_LOCKED, &sh->dev[i].flags);
  3289. s.locked++;
  3290. }
  3291. }
  3292. if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
  3293. !sh->reconstruct_state) {
  3294. /* Need to write out all blocks after computing parity */
  3295. sh->disks = conf->raid_disks;
  3296. stripe_set_idx(sh->sector, conf, 0, sh);
  3297. schedule_reconstruction(sh, &s, 1, 1);
  3298. } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
  3299. clear_bit(STRIPE_EXPAND_READY, &sh->state);
  3300. atomic_dec(&conf->reshape_stripes);
  3301. wake_up(&conf->wait_for_overlap);
  3302. md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
  3303. }
  3304. if (s.expanding && s.locked == 0 &&
  3305. !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
  3306. handle_stripe_expansion(conf, sh);
  3307. finish:
  3308. /* wait for this device to become unblocked */
  3309. if (unlikely(s.blocked_rdev)) {
  3310. if (conf->mddev->external)
  3311. md_wait_for_blocked_rdev(s.blocked_rdev,
  3312. conf->mddev);
  3313. else
  3314. /* Internal metadata will immediately
  3315. * be written by raid5d, so we don't
  3316. * need to wait here.
  3317. */
  3318. rdev_dec_pending(s.blocked_rdev,
  3319. conf->mddev);
  3320. }
  3321. if (s.handle_bad_blocks)
  3322. for (i = disks; i--; ) {
  3323. struct md_rdev *rdev;
  3324. struct r5dev *dev = &sh->dev[i];
  3325. if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
  3326. /* We own a safe reference to the rdev */
  3327. rdev = conf->disks[i].rdev;
  3328. if (!rdev_set_badblocks(rdev, sh->sector,
  3329. STRIPE_SECTORS, 0))
  3330. md_error(conf->mddev, rdev);
  3331. rdev_dec_pending(rdev, conf->mddev);
  3332. }
  3333. if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
  3334. rdev = conf->disks[i].rdev;
  3335. rdev_clear_badblocks(rdev, sh->sector,
  3336. STRIPE_SECTORS, 0);
  3337. rdev_dec_pending(rdev, conf->mddev);
  3338. }
  3339. if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
  3340. rdev = conf->disks[i].replacement;
  3341. if (!rdev)
  3342. /* rdev have been moved down */
  3343. rdev = conf->disks[i].rdev;
  3344. rdev_clear_badblocks(rdev, sh->sector,
  3345. STRIPE_SECTORS, 0);
  3346. rdev_dec_pending(rdev, conf->mddev);
  3347. }
  3348. }
  3349. if (s.ops_request)
  3350. raid_run_ops(sh, s.ops_request);
  3351. ops_run_io(sh, &s);
  3352. if (s.dec_preread_active) {
  3353. /* We delay this until after ops_run_io so that if make_request
  3354. * is waiting on a flush, it won't continue until the writes
  3355. * have actually been submitted.
  3356. */
  3357. atomic_dec(&conf->preread_active_stripes);
  3358. if (atomic_read(&conf->preread_active_stripes) <
  3359. IO_THRESHOLD)
  3360. md_wakeup_thread(conf->mddev->thread);
  3361. }
  3362. return_io(s.return_bi);
  3363. clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
  3364. }
  3365. static void raid5_activate_delayed(struct r5conf *conf)
  3366. {
  3367. if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
  3368. while (!list_empty(&conf->delayed_list)) {
  3369. struct list_head *l = conf->delayed_list.next;
  3370. struct stripe_head *sh;
  3371. sh = list_entry(l, struct stripe_head, lru);
  3372. list_del_init(l);
  3373. clear_bit(STRIPE_DELAYED, &sh->state);
  3374. if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  3375. atomic_inc(&conf->preread_active_stripes);
  3376. list_add_tail(&sh->lru, &conf->hold_list);
  3377. }
  3378. }
  3379. }
  3380. static void activate_bit_delay(struct r5conf *conf)
  3381. {
  3382. /* device_lock is held */
  3383. struct list_head head;
  3384. list_add(&head, &conf->bitmap_list);
  3385. list_del_init(&conf->bitmap_list);
  3386. while (!list_empty(&head)) {
  3387. struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
  3388. list_del_init(&sh->lru);
  3389. atomic_inc(&sh->count);
  3390. __release_stripe(conf, sh);
  3391. }
  3392. }
  3393. int md_raid5_congested(struct mddev *mddev, int bits)
  3394. {
  3395. struct r5conf *conf = mddev->private;
  3396. /* No difference between reads and writes. Just check
  3397. * how busy the stripe_cache is
  3398. */
  3399. if (conf->inactive_blocked)
  3400. return 1;
  3401. if (conf->quiesce)
  3402. return 1;
  3403. if (list_empty_careful(&conf->inactive_list))
  3404. return 1;
  3405. return 0;
  3406. }
  3407. EXPORT_SYMBOL_GPL(md_raid5_congested);
  3408. static int raid5_congested(void *data, int bits)
  3409. {
  3410. struct mddev *mddev = data;
  3411. return mddev_congested(mddev, bits) ||
  3412. md_raid5_congested(mddev, bits);
  3413. }
  3414. /* We want read requests to align with chunks where possible,
  3415. * but write requests don't need to.
  3416. */
  3417. static int raid5_mergeable_bvec(struct request_queue *q,
  3418. struct bvec_merge_data *bvm,
  3419. struct bio_vec *biovec)
  3420. {
  3421. struct mddev *mddev = q->queuedata;
  3422. sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
  3423. int max;
  3424. unsigned int chunk_sectors = mddev->chunk_sectors;
  3425. unsigned int bio_sectors = bvm->bi_size >> 9;
  3426. if ((bvm->bi_rw & 1) == WRITE)
  3427. return biovec->bv_len; /* always allow writes to be mergeable */
  3428. if (mddev->new_chunk_sectors < mddev->chunk_sectors)
  3429. chunk_sectors = mddev->new_chunk_sectors;
  3430. max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
  3431. if (max < 0) max = 0;
  3432. if (max <= biovec->bv_len && bio_sectors == 0)
  3433. return biovec->bv_len;
  3434. else
  3435. return max;
  3436. }
  3437. static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
  3438. {
  3439. sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
  3440. unsigned int chunk_sectors = mddev->chunk_sectors;
  3441. unsigned int bio_sectors = bio->bi_size >> 9;
  3442. if (mddev->new_chunk_sectors < mddev->chunk_sectors)
  3443. chunk_sectors = mddev->new_chunk_sectors;
  3444. return chunk_sectors >=
  3445. ((sector & (chunk_sectors - 1)) + bio_sectors);
  3446. }
  3447. /*
  3448. * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
  3449. * later sampled by raid5d.
  3450. */
  3451. static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
  3452. {
  3453. unsigned long flags;
  3454. spin_lock_irqsave(&conf->device_lock, flags);
  3455. bi->bi_next = conf->retry_read_aligned_list;
  3456. conf->retry_read_aligned_list = bi;
  3457. spin_unlock_irqrestore(&conf->device_lock, flags);
  3458. md_wakeup_thread(conf->mddev->thread);
  3459. }
  3460. static struct bio *remove_bio_from_retry(struct r5conf *conf)
  3461. {
  3462. struct bio *bi;
  3463. bi = conf->retry_read_aligned;
  3464. if (bi) {
  3465. conf->retry_read_aligned = NULL;
  3466. return bi;
  3467. }
  3468. bi = conf->retry_read_aligned_list;
  3469. if(bi) {
  3470. conf->retry_read_aligned_list = bi->bi_next;
  3471. bi->bi_next = NULL;
  3472. /*
  3473. * this sets the active strip count to 1 and the processed
  3474. * strip count to zero (upper 8 bits)
  3475. */
  3476. raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
  3477. }
  3478. return bi;
  3479. }
  3480. /*
  3481. * The "raid5_align_endio" should check if the read succeeded and if it
  3482. * did, call bio_endio on the original bio (having bio_put the new bio
  3483. * first).
  3484. * If the read failed..
  3485. */
  3486. static void raid5_align_endio(struct bio *bi, int error)
  3487. {
  3488. struct bio* raid_bi = bi->bi_private;
  3489. struct mddev *mddev;
  3490. struct r5conf *conf;
  3491. int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
  3492. struct md_rdev *rdev;
  3493. bio_put(bi);
  3494. rdev = (void*)raid_bi->bi_next;
  3495. raid_bi->bi_next = NULL;
  3496. mddev = rdev->mddev;
  3497. conf = mddev->private;
  3498. rdev_dec_pending(rdev, conf->mddev);
  3499. if (!error && uptodate) {
  3500. bio_endio(raid_bi, 0);
  3501. if (atomic_dec_and_test(&conf->active_aligned_reads))
  3502. wake_up(&conf->wait_for_stripe);
  3503. return;
  3504. }
  3505. pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
  3506. add_bio_to_retry(raid_bi, conf);
  3507. }
  3508. static int bio_fits_rdev(struct bio *bi)
  3509. {
  3510. struct request_queue *q = bdev_get_queue(bi->bi_bdev);
  3511. if ((bi->bi_size>>9) > queue_max_sectors(q))
  3512. return 0;
  3513. blk_recount_segments(q, bi);
  3514. if (bi->bi_phys_segments > queue_max_segments(q))
  3515. return 0;
  3516. if (q->merge_bvec_fn)
  3517. /* it's too hard to apply the merge_bvec_fn at this stage,
  3518. * just just give up
  3519. */
  3520. return 0;
  3521. return 1;
  3522. }
  3523. static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
  3524. {
  3525. struct r5conf *conf = mddev->private;
  3526. int dd_idx;
  3527. struct bio* align_bi;
  3528. struct md_rdev *rdev;
  3529. sector_t end_sector;
  3530. if (!in_chunk_boundary(mddev, raid_bio)) {
  3531. pr_debug("chunk_aligned_read : non aligned\n");
  3532. return 0;
  3533. }
  3534. /*
  3535. * use bio_clone_mddev to make a copy of the bio
  3536. */
  3537. align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
  3538. if (!align_bi)
  3539. return 0;
  3540. /*
  3541. * set bi_end_io to a new function, and set bi_private to the
  3542. * original bio.
  3543. */
  3544. align_bi->bi_end_io = raid5_align_endio;
  3545. align_bi->bi_private = raid_bio;
  3546. /*
  3547. * compute position
  3548. */
  3549. align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
  3550. 0,
  3551. &dd_idx, NULL);
  3552. end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9);
  3553. rcu_read_lock();
  3554. rdev = rcu_dereference(conf->disks[dd_idx].replacement);
  3555. if (!rdev || test_bit(Faulty, &rdev->flags) ||
  3556. rdev->recovery_offset < end_sector) {
  3557. rdev = rcu_dereference(conf->disks[dd_idx].rdev);
  3558. if (rdev &&
  3559. (test_bit(Faulty, &rdev->flags) ||
  3560. !(test_bit(In_sync, &rdev->flags) ||
  3561. rdev->recovery_offset >= end_sector)))
  3562. rdev = NULL;
  3563. }
  3564. if (rdev) {
  3565. sector_t first_bad;
  3566. int bad_sectors;
  3567. atomic_inc(&rdev->nr_pending);
  3568. rcu_read_unlock();
  3569. raid_bio->bi_next = (void*)rdev;
  3570. align_bi->bi_bdev = rdev->bdev;
  3571. align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
  3572. if (!bio_fits_rdev(align_bi) ||
  3573. is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
  3574. &first_bad, &bad_sectors)) {
  3575. /* too big in some way, or has a known bad block */
  3576. bio_put(align_bi);
  3577. rdev_dec_pending(rdev, mddev);
  3578. return 0;
  3579. }
  3580. /* No reshape active, so we can trust rdev->data_offset */
  3581. align_bi->bi_sector += rdev->data_offset;
  3582. spin_lock_irq(&conf->device_lock);
  3583. wait_event_lock_irq(conf->wait_for_stripe,
  3584. conf->quiesce == 0,
  3585. conf->device_lock, /* nothing */);
  3586. atomic_inc(&conf->active_aligned_reads);
  3587. spin_unlock_irq(&conf->device_lock);
  3588. generic_make_request(align_bi);
  3589. return 1;
  3590. } else {
  3591. rcu_read_unlock();
  3592. bio_put(align_bi);
  3593. return 0;
  3594. }
  3595. }
  3596. /* __get_priority_stripe - get the next stripe to process
  3597. *
  3598. * Full stripe writes are allowed to pass preread active stripes up until
  3599. * the bypass_threshold is exceeded. In general the bypass_count
  3600. * increments when the handle_list is handled before the hold_list; however, it
  3601. * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
  3602. * stripe with in flight i/o. The bypass_count will be reset when the
  3603. * head of the hold_list has changed, i.e. the head was promoted to the
  3604. * handle_list.
  3605. */
  3606. static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
  3607. {
  3608. struct stripe_head *sh;
  3609. pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
  3610. __func__,
  3611. list_empty(&conf->handle_list) ? "empty" : "busy",
  3612. list_empty(&conf->hold_list) ? "empty" : "busy",
  3613. atomic_read(&conf->pending_full_writes), conf->bypass_count);
  3614. if (!list_empty(&conf->handle_list)) {
  3615. sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
  3616. if (list_empty(&conf->hold_list))
  3617. conf->bypass_count = 0;
  3618. else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
  3619. if (conf->hold_list.next == conf->last_hold)
  3620. conf->bypass_count++;
  3621. else {
  3622. conf->last_hold = conf->hold_list.next;
  3623. conf->bypass_count -= conf->bypass_threshold;
  3624. if (conf->bypass_count < 0)
  3625. conf->bypass_count = 0;
  3626. }
  3627. }
  3628. } else if (!list_empty(&conf->hold_list) &&
  3629. ((conf->bypass_threshold &&
  3630. conf->bypass_count > conf->bypass_threshold) ||
  3631. atomic_read(&conf->pending_full_writes) == 0)) {
  3632. sh = list_entry(conf->hold_list.next,
  3633. typeof(*sh), lru);
  3634. conf->bypass_count -= conf->bypass_threshold;
  3635. if (conf->bypass_count < 0)
  3636. conf->bypass_count = 0;
  3637. } else
  3638. return NULL;
  3639. list_del_init(&sh->lru);
  3640. atomic_inc(&sh->count);
  3641. BUG_ON(atomic_read(&sh->count) != 1);
  3642. return sh;
  3643. }
  3644. struct raid5_plug_cb {
  3645. struct blk_plug_cb cb;
  3646. struct list_head list;
  3647. };
  3648. static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
  3649. {
  3650. struct raid5_plug_cb *cb = container_of(
  3651. blk_cb, struct raid5_plug_cb, cb);
  3652. struct stripe_head *sh;
  3653. struct mddev *mddev = cb->cb.data;
  3654. struct r5conf *conf = mddev->private;
  3655. if (cb->list.next && !list_empty(&cb->list)) {
  3656. spin_lock_irq(&conf->device_lock);
  3657. while (!list_empty(&cb->list)) {
  3658. sh = list_first_entry(&cb->list, struct stripe_head, lru);
  3659. list_del_init(&sh->lru);
  3660. /*
  3661. * avoid race release_stripe_plug() sees
  3662. * STRIPE_ON_UNPLUG_LIST clear but the stripe
  3663. * is still in our list
  3664. */
  3665. smp_mb__before_clear_bit();
  3666. clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
  3667. __release_stripe(conf, sh);
  3668. }
  3669. spin_unlock_irq(&conf->device_lock);
  3670. }
  3671. kfree(cb);
  3672. }
  3673. static void release_stripe_plug(struct mddev *mddev,
  3674. struct stripe_head *sh)
  3675. {
  3676. struct blk_plug_cb *blk_cb = blk_check_plugged(
  3677. raid5_unplug, mddev,
  3678. sizeof(struct raid5_plug_cb));
  3679. struct raid5_plug_cb *cb;
  3680. if (!blk_cb) {
  3681. release_stripe(sh);
  3682. return;
  3683. }
  3684. cb = container_of(blk_cb, struct raid5_plug_cb, cb);
  3685. if (cb->list.next == NULL)
  3686. INIT_LIST_HEAD(&cb->list);
  3687. if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
  3688. list_add_tail(&sh->lru, &cb->list);
  3689. else
  3690. release_stripe(sh);
  3691. }
  3692. static void make_discard_request(struct mddev *mddev, struct bio *bi)
  3693. {
  3694. struct r5conf *conf = mddev->private;
  3695. sector_t logical_sector, last_sector;
  3696. struct stripe_head *sh;
  3697. int remaining;
  3698. int stripe_sectors;
  3699. if (mddev->reshape_position != MaxSector)
  3700. /* Skip discard while reshape is happening */
  3701. return;
  3702. logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
  3703. last_sector = bi->bi_sector + (bi->bi_size>>9);
  3704. bi->bi_next = NULL;
  3705. bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
  3706. stripe_sectors = conf->chunk_sectors *
  3707. (conf->raid_disks - conf->max_degraded);
  3708. logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
  3709. stripe_sectors);
  3710. sector_div(last_sector, stripe_sectors);
  3711. logical_sector *= conf->chunk_sectors;
  3712. last_sector *= conf->chunk_sectors;
  3713. for (; logical_sector < last_sector;
  3714. logical_sector += STRIPE_SECTORS) {
  3715. DEFINE_WAIT(w);
  3716. int d;
  3717. again:
  3718. sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
  3719. prepare_to_wait(&conf->wait_for_overlap, &w,
  3720. TASK_UNINTERRUPTIBLE);
  3721. spin_lock_irq(&sh->stripe_lock);
  3722. for (d = 0; d < conf->raid_disks; d++) {
  3723. if (d == sh->pd_idx || d == sh->qd_idx)
  3724. continue;
  3725. if (sh->dev[d].towrite || sh->dev[d].toread) {
  3726. set_bit(R5_Overlap, &sh->dev[d].flags);
  3727. spin_unlock_irq(&sh->stripe_lock);
  3728. release_stripe(sh);
  3729. schedule();
  3730. goto again;
  3731. }
  3732. }
  3733. finish_wait(&conf->wait_for_overlap, &w);
  3734. for (d = 0; d < conf->raid_disks; d++) {
  3735. if (d == sh->pd_idx || d == sh->qd_idx)
  3736. continue;
  3737. sh->dev[d].towrite = bi;
  3738. set_bit(R5_OVERWRITE, &sh->dev[d].flags);
  3739. raid5_inc_bi_active_stripes(bi);
  3740. }
  3741. spin_unlock_irq(&sh->stripe_lock);
  3742. if (conf->mddev->bitmap) {
  3743. for (d = 0;
  3744. d < conf->raid_disks - conf->max_degraded;
  3745. d++)
  3746. bitmap_startwrite(mddev->bitmap,
  3747. sh->sector,
  3748. STRIPE_SECTORS,
  3749. 0);
  3750. sh->bm_seq = conf->seq_flush + 1;
  3751. set_bit(STRIPE_BIT_DELAY, &sh->state);
  3752. }
  3753. set_bit(STRIPE_HANDLE, &sh->state);
  3754. clear_bit(STRIPE_DELAYED, &sh->state);
  3755. if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  3756. atomic_inc(&conf->preread_active_stripes);
  3757. release_stripe_plug(mddev, sh);
  3758. }
  3759. remaining = raid5_dec_bi_active_stripes(bi);
  3760. if (remaining == 0) {
  3761. md_write_end(mddev);
  3762. bio_endio(bi, 0);
  3763. }
  3764. }
  3765. static void make_request(struct mddev *mddev, struct bio * bi)
  3766. {
  3767. struct r5conf *conf = mddev->private;
  3768. int dd_idx;
  3769. sector_t new_sector;
  3770. sector_t logical_sector, last_sector;
  3771. struct stripe_head *sh;
  3772. const int rw = bio_data_dir(bi);
  3773. int remaining;
  3774. if (unlikely(bi->bi_rw & REQ_FLUSH)) {
  3775. md_flush_request(mddev, bi);
  3776. return;
  3777. }
  3778. md_write_start(mddev, bi);
  3779. if (rw == READ &&
  3780. mddev->reshape_position == MaxSector &&
  3781. chunk_aligned_read(mddev,bi))
  3782. return;
  3783. if (unlikely(bi->bi_rw & REQ_DISCARD)) {
  3784. make_discard_request(mddev, bi);
  3785. return;
  3786. }
  3787. logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
  3788. last_sector = bi->bi_sector + (bi->bi_size>>9);
  3789. bi->bi_next = NULL;
  3790. bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
  3791. for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
  3792. DEFINE_WAIT(w);
  3793. int previous;
  3794. retry:
  3795. previous = 0;
  3796. prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
  3797. if (unlikely(conf->reshape_progress != MaxSector)) {
  3798. /* spinlock is needed as reshape_progress may be
  3799. * 64bit on a 32bit platform, and so it might be
  3800. * possible to see a half-updated value
  3801. * Of course reshape_progress could change after
  3802. * the lock is dropped, so once we get a reference
  3803. * to the stripe that we think it is, we will have
  3804. * to check again.
  3805. */
  3806. spin_lock_irq(&conf->device_lock);
  3807. if (mddev->reshape_backwards
  3808. ? logical_sector < conf->reshape_progress
  3809. : logical_sector >= conf->reshape_progress) {
  3810. previous = 1;
  3811. } else {
  3812. if (mddev->reshape_backwards
  3813. ? logical_sector < conf->reshape_safe
  3814. : logical_sector >= conf->reshape_safe) {
  3815. spin_unlock_irq(&conf->device_lock);
  3816. schedule();
  3817. goto retry;
  3818. }
  3819. }
  3820. spin_unlock_irq(&conf->device_lock);
  3821. }
  3822. new_sector = raid5_compute_sector(conf, logical_sector,
  3823. previous,
  3824. &dd_idx, NULL);
  3825. pr_debug("raid456: make_request, sector %llu logical %llu\n",
  3826. (unsigned long long)new_sector,
  3827. (unsigned long long)logical_sector);
  3828. sh = get_active_stripe(conf, new_sector, previous,
  3829. (bi->bi_rw&RWA_MASK), 0);
  3830. if (sh) {
  3831. if (unlikely(previous)) {
  3832. /* expansion might have moved on while waiting for a
  3833. * stripe, so we must do the range check again.
  3834. * Expansion could still move past after this
  3835. * test, but as we are holding a reference to
  3836. * 'sh', we know that if that happens,
  3837. * STRIPE_EXPANDING will get set and the expansion
  3838. * won't proceed until we finish with the stripe.
  3839. */
  3840. int must_retry = 0;
  3841. spin_lock_irq(&conf->device_lock);
  3842. if (mddev->reshape_backwards
  3843. ? logical_sector >= conf->reshape_progress
  3844. : logical_sector < conf->reshape_progress)
  3845. /* mismatch, need to try again */
  3846. must_retry = 1;
  3847. spin_unlock_irq(&conf->device_lock);
  3848. if (must_retry) {
  3849. release_stripe(sh);
  3850. schedule();
  3851. goto retry;
  3852. }
  3853. }
  3854. if (rw == WRITE &&
  3855. logical_sector >= mddev->suspend_lo &&
  3856. logical_sector < mddev->suspend_hi) {
  3857. release_stripe(sh);
  3858. /* As the suspend_* range is controlled by
  3859. * userspace, we want an interruptible
  3860. * wait.
  3861. */
  3862. flush_signals(current);
  3863. prepare_to_wait(&conf->wait_for_overlap,
  3864. &w, TASK_INTERRUPTIBLE);
  3865. if (logical_sector >= mddev->suspend_lo &&
  3866. logical_sector < mddev->suspend_hi)
  3867. schedule();
  3868. goto retry;
  3869. }
  3870. if (test_bit(STRIPE_EXPANDING, &sh->state) ||
  3871. !add_stripe_bio(sh, bi, dd_idx, rw)) {
  3872. /* Stripe is busy expanding or
  3873. * add failed due to overlap. Flush everything
  3874. * and wait a while
  3875. */
  3876. md_wakeup_thread(mddev->thread);
  3877. release_stripe(sh);
  3878. schedule();
  3879. goto retry;
  3880. }
  3881. finish_wait(&conf->wait_for_overlap, &w);
  3882. set_bit(STRIPE_HANDLE, &sh->state);
  3883. clear_bit(STRIPE_DELAYED, &sh->state);
  3884. if ((bi->bi_rw & REQ_SYNC) &&
  3885. !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  3886. atomic_inc(&conf->preread_active_stripes);
  3887. release_stripe_plug(mddev, sh);
  3888. } else {
  3889. /* cannot get stripe for read-ahead, just give-up */
  3890. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  3891. finish_wait(&conf->wait_for_overlap, &w);
  3892. break;
  3893. }
  3894. }
  3895. remaining = raid5_dec_bi_active_stripes(bi);
  3896. if (remaining == 0) {
  3897. if ( rw == WRITE )
  3898. md_write_end(mddev);
  3899. bio_endio(bi, 0);
  3900. }
  3901. }
  3902. static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
  3903. static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
  3904. {
  3905. /* reshaping is quite different to recovery/resync so it is
  3906. * handled quite separately ... here.
  3907. *
  3908. * On each call to sync_request, we gather one chunk worth of
  3909. * destination stripes and flag them as expanding.
  3910. * Then we find all the source stripes and request reads.
  3911. * As the reads complete, handle_stripe will copy the data
  3912. * into the destination stripe and release that stripe.
  3913. */
  3914. struct r5conf *conf = mddev->private;
  3915. struct stripe_head *sh;
  3916. sector_t first_sector, last_sector;
  3917. int raid_disks = conf->previous_raid_disks;
  3918. int data_disks = raid_disks - conf->max_degraded;
  3919. int new_data_disks = conf->raid_disks - conf->max_degraded;
  3920. int i;
  3921. int dd_idx;
  3922. sector_t writepos, readpos, safepos;
  3923. sector_t stripe_addr;
  3924. int reshape_sectors;
  3925. struct list_head stripes;
  3926. if (sector_nr == 0) {
  3927. /* If restarting in the middle, skip the initial sectors */
  3928. if (mddev->reshape_backwards &&
  3929. conf->reshape_progress < raid5_size(mddev, 0, 0)) {
  3930. sector_nr = raid5_size(mddev, 0, 0)
  3931. - conf->reshape_progress;
  3932. } else if (!mddev->reshape_backwards &&
  3933. conf->reshape_progress > 0)
  3934. sector_nr = conf->reshape_progress;
  3935. sector_div(sector_nr, new_data_disks);
  3936. if (sector_nr) {
  3937. mddev->curr_resync_completed = sector_nr;
  3938. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  3939. *skipped = 1;
  3940. return sector_nr;
  3941. }
  3942. }
  3943. /* We need to process a full chunk at a time.
  3944. * If old and new chunk sizes differ, we need to process the
  3945. * largest of these
  3946. */
  3947. if (mddev->new_chunk_sectors > mddev->chunk_sectors)
  3948. reshape_sectors = mddev->new_chunk_sectors;
  3949. else
  3950. reshape_sectors = mddev->chunk_sectors;
  3951. /* We update the metadata at least every 10 seconds, or when
  3952. * the data about to be copied would over-write the source of
  3953. * the data at the front of the range. i.e. one new_stripe
  3954. * along from reshape_progress new_maps to after where
  3955. * reshape_safe old_maps to
  3956. */
  3957. writepos = conf->reshape_progress;
  3958. sector_div(writepos, new_data_disks);
  3959. readpos = conf->reshape_progress;
  3960. sector_div(readpos, data_disks);
  3961. safepos = conf->reshape_safe;
  3962. sector_div(safepos, data_disks);
  3963. if (mddev->reshape_backwards) {
  3964. writepos -= min_t(sector_t, reshape_sectors, writepos);
  3965. readpos += reshape_sectors;
  3966. safepos += reshape_sectors;
  3967. } else {
  3968. writepos += reshape_sectors;
  3969. readpos -= min_t(sector_t, reshape_sectors, readpos);
  3970. safepos -= min_t(sector_t, reshape_sectors, safepos);
  3971. }
  3972. /* Having calculated the 'writepos' possibly use it
  3973. * to set 'stripe_addr' which is where we will write to.
  3974. */
  3975. if (mddev->reshape_backwards) {
  3976. BUG_ON(conf->reshape_progress == 0);
  3977. stripe_addr = writepos;
  3978. BUG_ON((mddev->dev_sectors &
  3979. ~((sector_t)reshape_sectors - 1))
  3980. - reshape_sectors - stripe_addr
  3981. != sector_nr);
  3982. } else {
  3983. BUG_ON(writepos != sector_nr + reshape_sectors);
  3984. stripe_addr = sector_nr;
  3985. }
  3986. /* 'writepos' is the most advanced device address we might write.
  3987. * 'readpos' is the least advanced device address we might read.
  3988. * 'safepos' is the least address recorded in the metadata as having
  3989. * been reshaped.
  3990. * If there is a min_offset_diff, these are adjusted either by
  3991. * increasing the safepos/readpos if diff is negative, or
  3992. * increasing writepos if diff is positive.
  3993. * If 'readpos' is then behind 'writepos', there is no way that we can
  3994. * ensure safety in the face of a crash - that must be done by userspace
  3995. * making a backup of the data. So in that case there is no particular
  3996. * rush to update metadata.
  3997. * Otherwise if 'safepos' is behind 'writepos', then we really need to
  3998. * update the metadata to advance 'safepos' to match 'readpos' so that
  3999. * we can be safe in the event of a crash.
  4000. * So we insist on updating metadata if safepos is behind writepos and
  4001. * readpos is beyond writepos.
  4002. * In any case, update the metadata every 10 seconds.
  4003. * Maybe that number should be configurable, but I'm not sure it is
  4004. * worth it.... maybe it could be a multiple of safemode_delay???
  4005. */
  4006. if (conf->min_offset_diff < 0) {
  4007. safepos += -conf->min_offset_diff;
  4008. readpos += -conf->min_offset_diff;
  4009. } else
  4010. writepos += conf->min_offset_diff;
  4011. if ((mddev->reshape_backwards
  4012. ? (safepos > writepos && readpos < writepos)
  4013. : (safepos < writepos && readpos > writepos)) ||
  4014. time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
  4015. /* Cannot proceed until we've updated the superblock... */
  4016. wait_event(conf->wait_for_overlap,
  4017. atomic_read(&conf->reshape_stripes)==0);
  4018. mddev->reshape_position = conf->reshape_progress;
  4019. mddev->curr_resync_completed = sector_nr;
  4020. conf->reshape_checkpoint = jiffies;
  4021. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  4022. md_wakeup_thread(mddev->thread);
  4023. wait_event(mddev->sb_wait, mddev->flags == 0 ||
  4024. kthread_should_stop());
  4025. spin_lock_irq(&conf->device_lock);
  4026. conf->reshape_safe = mddev->reshape_position;
  4027. spin_unlock_irq(&conf->device_lock);
  4028. wake_up(&conf->wait_for_overlap);
  4029. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  4030. }
  4031. INIT_LIST_HEAD(&stripes);
  4032. for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
  4033. int j;
  4034. int skipped_disk = 0;
  4035. sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
  4036. set_bit(STRIPE_EXPANDING, &sh->state);
  4037. atomic_inc(&conf->reshape_stripes);
  4038. /* If any of this stripe is beyond the end of the old
  4039. * array, then we need to zero those blocks
  4040. */
  4041. for (j=sh->disks; j--;) {
  4042. sector_t s;
  4043. if (j == sh->pd_idx)
  4044. continue;
  4045. if (conf->level == 6 &&
  4046. j == sh->qd_idx)
  4047. continue;
  4048. s = compute_blocknr(sh, j, 0);
  4049. if (s < raid5_size(mddev, 0, 0)) {
  4050. skipped_disk = 1;
  4051. continue;
  4052. }
  4053. memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
  4054. set_bit(R5_Expanded, &sh->dev[j].flags);
  4055. set_bit(R5_UPTODATE, &sh->dev[j].flags);
  4056. }
  4057. if (!skipped_disk) {
  4058. set_bit(STRIPE_EXPAND_READY, &sh->state);
  4059. set_bit(STRIPE_HANDLE, &sh->state);
  4060. }
  4061. list_add(&sh->lru, &stripes);
  4062. }
  4063. spin_lock_irq(&conf->device_lock);
  4064. if (mddev->reshape_backwards)
  4065. conf->reshape_progress -= reshape_sectors * new_data_disks;
  4066. else
  4067. conf->reshape_progress += reshape_sectors * new_data_disks;
  4068. spin_unlock_irq(&conf->device_lock);
  4069. /* Ok, those stripe are ready. We can start scheduling
  4070. * reads on the source stripes.
  4071. * The source stripes are determined by mapping the first and last
  4072. * block on the destination stripes.
  4073. */
  4074. first_sector =
  4075. raid5_compute_sector(conf, stripe_addr*(new_data_disks),
  4076. 1, &dd_idx, NULL);
  4077. last_sector =
  4078. raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
  4079. * new_data_disks - 1),
  4080. 1, &dd_idx, NULL);
  4081. if (last_sector >= mddev->dev_sectors)
  4082. last_sector = mddev->dev_sectors - 1;
  4083. while (first_sector <= last_sector) {
  4084. sh = get_active_stripe(conf, first_sector, 1, 0, 1);
  4085. set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
  4086. set_bit(STRIPE_HANDLE, &sh->state);
  4087. release_stripe(sh);
  4088. first_sector += STRIPE_SECTORS;
  4089. }
  4090. /* Now that the sources are clearly marked, we can release
  4091. * the destination stripes
  4092. */
  4093. while (!list_empty(&stripes)) {
  4094. sh = list_entry(stripes.next, struct stripe_head, lru);
  4095. list_del_init(&sh->lru);
  4096. release_stripe(sh);
  4097. }
  4098. /* If this takes us to the resync_max point where we have to pause,
  4099. * then we need to write out the superblock.
  4100. */
  4101. sector_nr += reshape_sectors;
  4102. if ((sector_nr - mddev->curr_resync_completed) * 2
  4103. >= mddev->resync_max - mddev->curr_resync_completed) {
  4104. /* Cannot proceed until we've updated the superblock... */
  4105. wait_event(conf->wait_for_overlap,
  4106. atomic_read(&conf->reshape_stripes) == 0);
  4107. mddev->reshape_position = conf->reshape_progress;
  4108. mddev->curr_resync_completed = sector_nr;
  4109. conf->reshape_checkpoint = jiffies;
  4110. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  4111. md_wakeup_thread(mddev->thread);
  4112. wait_event(mddev->sb_wait,
  4113. !test_bit(MD_CHANGE_DEVS, &mddev->flags)
  4114. || kthread_should_stop());
  4115. spin_lock_irq(&conf->device_lock);
  4116. conf->reshape_safe = mddev->reshape_position;
  4117. spin_unlock_irq(&conf->device_lock);
  4118. wake_up(&conf->wait_for_overlap);
  4119. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  4120. }
  4121. return reshape_sectors;
  4122. }
  4123. /* FIXME go_faster isn't used */
  4124. static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
  4125. {
  4126. struct r5conf *conf = mddev->private;
  4127. struct stripe_head *sh;
  4128. sector_t max_sector = mddev->dev_sectors;
  4129. sector_t sync_blocks;
  4130. int still_degraded = 0;
  4131. int i;
  4132. if (sector_nr >= max_sector) {
  4133. /* just being told to finish up .. nothing much to do */
  4134. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
  4135. end_reshape(conf);
  4136. return 0;
  4137. }
  4138. if (mddev->curr_resync < max_sector) /* aborted */
  4139. bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
  4140. &sync_blocks, 1);
  4141. else /* completed sync */
  4142. conf->fullsync = 0;
  4143. bitmap_close_sync(mddev->bitmap);
  4144. return 0;
  4145. }
  4146. /* Allow raid5_quiesce to complete */
  4147. wait_event(conf->wait_for_overlap, conf->quiesce != 2);
  4148. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  4149. return reshape_request(mddev, sector_nr, skipped);
  4150. /* No need to check resync_max as we never do more than one
  4151. * stripe, and as resync_max will always be on a chunk boundary,
  4152. * if the check in md_do_sync didn't fire, there is no chance
  4153. * of overstepping resync_max here
  4154. */
  4155. /* if there is too many failed drives and we are trying
  4156. * to resync, then assert that we are finished, because there is
  4157. * nothing we can do.
  4158. */
  4159. if (mddev->degraded >= conf->max_degraded &&
  4160. test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  4161. sector_t rv = mddev->dev_sectors - sector_nr;
  4162. *skipped = 1;
  4163. return rv;
  4164. }
  4165. if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
  4166. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
  4167. !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
  4168. /* we can skip this block, and probably more */
  4169. sync_blocks /= STRIPE_SECTORS;
  4170. *skipped = 1;
  4171. return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
  4172. }
  4173. bitmap_cond_end_sync(mddev->bitmap, sector_nr);
  4174. sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
  4175. if (sh == NULL) {
  4176. sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
  4177. /* make sure we don't swamp the stripe cache if someone else
  4178. * is trying to get access
  4179. */
  4180. schedule_timeout_uninterruptible(1);
  4181. }
  4182. /* Need to check if array will still be degraded after recovery/resync
  4183. * We don't need to check the 'failed' flag as when that gets set,
  4184. * recovery aborts.
  4185. */
  4186. for (i = 0; i < conf->raid_disks; i++)
  4187. if (conf->disks[i].rdev == NULL)
  4188. still_degraded = 1;
  4189. bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
  4190. set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
  4191. handle_stripe(sh);
  4192. release_stripe(sh);
  4193. return STRIPE_SECTORS;
  4194. }
  4195. static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
  4196. {
  4197. /* We may not be able to submit a whole bio at once as there
  4198. * may not be enough stripe_heads available.
  4199. * We cannot pre-allocate enough stripe_heads as we may need
  4200. * more than exist in the cache (if we allow ever large chunks).
  4201. * So we do one stripe head at a time and record in
  4202. * ->bi_hw_segments how many have been done.
  4203. *
  4204. * We *know* that this entire raid_bio is in one chunk, so
  4205. * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
  4206. */
  4207. struct stripe_head *sh;
  4208. int dd_idx;
  4209. sector_t sector, logical_sector, last_sector;
  4210. int scnt = 0;
  4211. int remaining;
  4212. int handled = 0;
  4213. logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
  4214. sector = raid5_compute_sector(conf, logical_sector,
  4215. 0, &dd_idx, NULL);
  4216. last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
  4217. for (; logical_sector < last_sector;
  4218. logical_sector += STRIPE_SECTORS,
  4219. sector += STRIPE_SECTORS,
  4220. scnt++) {
  4221. if (scnt < raid5_bi_processed_stripes(raid_bio))
  4222. /* already done this stripe */
  4223. continue;
  4224. sh = get_active_stripe(conf, sector, 0, 1, 0);
  4225. if (!sh) {
  4226. /* failed to get a stripe - must wait */
  4227. raid5_set_bi_processed_stripes(raid_bio, scnt);
  4228. conf->retry_read_aligned = raid_bio;
  4229. return handled;
  4230. }
  4231. if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
  4232. release_stripe(sh);
  4233. raid5_set_bi_processed_stripes(raid_bio, scnt);
  4234. conf->retry_read_aligned = raid_bio;
  4235. return handled;
  4236. }
  4237. set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
  4238. handle_stripe(sh);
  4239. release_stripe(sh);
  4240. handled++;
  4241. }
  4242. remaining = raid5_dec_bi_active_stripes(raid_bio);
  4243. if (remaining == 0)
  4244. bio_endio(raid_bio, 0);
  4245. if (atomic_dec_and_test(&conf->active_aligned_reads))
  4246. wake_up(&conf->wait_for_stripe);
  4247. return handled;
  4248. }
  4249. #define MAX_STRIPE_BATCH 8
  4250. static int handle_active_stripes(struct r5conf *conf)
  4251. {
  4252. struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
  4253. int i, batch_size = 0;
  4254. while (batch_size < MAX_STRIPE_BATCH &&
  4255. (sh = __get_priority_stripe(conf)) != NULL)
  4256. batch[batch_size++] = sh;
  4257. if (batch_size == 0)
  4258. return batch_size;
  4259. spin_unlock_irq(&conf->device_lock);
  4260. for (i = 0; i < batch_size; i++)
  4261. handle_stripe(batch[i]);
  4262. cond_resched();
  4263. spin_lock_irq(&conf->device_lock);
  4264. for (i = 0; i < batch_size; i++)
  4265. __release_stripe(conf, batch[i]);
  4266. return batch_size;
  4267. }
  4268. /*
  4269. * This is our raid5 kernel thread.
  4270. *
  4271. * We scan the hash table for stripes which can be handled now.
  4272. * During the scan, completed stripes are saved for us by the interrupt
  4273. * handler, so that they will not have to wait for our next wakeup.
  4274. */
  4275. static void raid5d(struct md_thread *thread)
  4276. {
  4277. struct mddev *mddev = thread->mddev;
  4278. struct r5conf *conf = mddev->private;
  4279. int handled;
  4280. struct blk_plug plug;
  4281. pr_debug("+++ raid5d active\n");
  4282. md_check_recovery(mddev);
  4283. blk_start_plug(&plug);
  4284. handled = 0;
  4285. spin_lock_irq(&conf->device_lock);
  4286. while (1) {
  4287. struct bio *bio;
  4288. int batch_size;
  4289. if (
  4290. !list_empty(&conf->bitmap_list)) {
  4291. /* Now is a good time to flush some bitmap updates */
  4292. conf->seq_flush++;
  4293. spin_unlock_irq(&conf->device_lock);
  4294. bitmap_unplug(mddev->bitmap);
  4295. spin_lock_irq(&conf->device_lock);
  4296. conf->seq_write = conf->seq_flush;
  4297. activate_bit_delay(conf);
  4298. }
  4299. raid5_activate_delayed(conf);
  4300. while ((bio = remove_bio_from_retry(conf))) {
  4301. int ok;
  4302. spin_unlock_irq(&conf->device_lock);
  4303. ok = retry_aligned_read(conf, bio);
  4304. spin_lock_irq(&conf->device_lock);
  4305. if (!ok)
  4306. break;
  4307. handled++;
  4308. }
  4309. batch_size = handle_active_stripes(conf);
  4310. if (!batch_size)
  4311. break;
  4312. handled += batch_size;
  4313. if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
  4314. spin_unlock_irq(&conf->device_lock);
  4315. md_check_recovery(mddev);
  4316. spin_lock_irq(&conf->device_lock);
  4317. }
  4318. }
  4319. pr_debug("%d stripes handled\n", handled);
  4320. spin_unlock_irq(&conf->device_lock);
  4321. async_tx_issue_pending_all();
  4322. blk_finish_plug(&plug);
  4323. pr_debug("--- raid5d inactive\n");
  4324. }
  4325. static ssize_t
  4326. raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
  4327. {
  4328. struct r5conf *conf = mddev->private;
  4329. if (conf)
  4330. return sprintf(page, "%d\n", conf->max_nr_stripes);
  4331. else
  4332. return 0;
  4333. }
  4334. int
  4335. raid5_set_cache_size(struct mddev *mddev, int size)
  4336. {
  4337. struct r5conf *conf = mddev->private;
  4338. int err;
  4339. if (size <= 16 || size > 32768)
  4340. return -EINVAL;
  4341. while (size < conf->max_nr_stripes) {
  4342. if (drop_one_stripe(conf))
  4343. conf->max_nr_stripes--;
  4344. else
  4345. break;
  4346. }
  4347. err = md_allow_write(mddev);
  4348. if (err)
  4349. return err;
  4350. while (size > conf->max_nr_stripes) {
  4351. if (grow_one_stripe(conf))
  4352. conf->max_nr_stripes++;
  4353. else break;
  4354. }
  4355. return 0;
  4356. }
  4357. EXPORT_SYMBOL(raid5_set_cache_size);
  4358. static ssize_t
  4359. raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
  4360. {
  4361. struct r5conf *conf = mddev->private;
  4362. unsigned long new;
  4363. int err;
  4364. if (len >= PAGE_SIZE)
  4365. return -EINVAL;
  4366. if (!conf)
  4367. return -ENODEV;
  4368. if (strict_strtoul(page, 10, &new))
  4369. return -EINVAL;
  4370. err = raid5_set_cache_size(mddev, new);
  4371. if (err)
  4372. return err;
  4373. return len;
  4374. }
  4375. static struct md_sysfs_entry
  4376. raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
  4377. raid5_show_stripe_cache_size,
  4378. raid5_store_stripe_cache_size);
  4379. static ssize_t
  4380. raid5_show_preread_threshold(struct mddev *mddev, char *page)
  4381. {
  4382. struct r5conf *conf = mddev->private;
  4383. if (conf)
  4384. return sprintf(page, "%d\n", conf->bypass_threshold);
  4385. else
  4386. return 0;
  4387. }
  4388. static ssize_t
  4389. raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
  4390. {
  4391. struct r5conf *conf = mddev->private;
  4392. unsigned long new;
  4393. if (len >= PAGE_SIZE)
  4394. return -EINVAL;
  4395. if (!conf)
  4396. return -ENODEV;
  4397. if (strict_strtoul(page, 10, &new))
  4398. return -EINVAL;
  4399. if (new > conf->max_nr_stripes)
  4400. return -EINVAL;
  4401. conf->bypass_threshold = new;
  4402. return len;
  4403. }
  4404. static struct md_sysfs_entry
  4405. raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
  4406. S_IRUGO | S_IWUSR,
  4407. raid5_show_preread_threshold,
  4408. raid5_store_preread_threshold);
  4409. static ssize_t
  4410. stripe_cache_active_show(struct mddev *mddev, char *page)
  4411. {
  4412. struct r5conf *conf = mddev->private;
  4413. if (conf)
  4414. return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
  4415. else
  4416. return 0;
  4417. }
  4418. static struct md_sysfs_entry
  4419. raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
  4420. static struct attribute *raid5_attrs[] = {
  4421. &raid5_stripecache_size.attr,
  4422. &raid5_stripecache_active.attr,
  4423. &raid5_preread_bypass_threshold.attr,
  4424. NULL,
  4425. };
  4426. static struct attribute_group raid5_attrs_group = {
  4427. .name = NULL,
  4428. .attrs = raid5_attrs,
  4429. };
  4430. static sector_t
  4431. raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
  4432. {
  4433. struct r5conf *conf = mddev->private;
  4434. if (!sectors)
  4435. sectors = mddev->dev_sectors;
  4436. if (!raid_disks)
  4437. /* size is defined by the smallest of previous and new size */
  4438. raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
  4439. sectors &= ~((sector_t)mddev->chunk_sectors - 1);
  4440. sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
  4441. return sectors * (raid_disks - conf->max_degraded);
  4442. }
  4443. static void raid5_free_percpu(struct r5conf *conf)
  4444. {
  4445. struct raid5_percpu *percpu;
  4446. unsigned long cpu;
  4447. if (!conf->percpu)
  4448. return;
  4449. get_online_cpus();
  4450. for_each_possible_cpu(cpu) {
  4451. percpu = per_cpu_ptr(conf->percpu, cpu);
  4452. safe_put_page(percpu->spare_page);
  4453. kfree(percpu->scribble);
  4454. }
  4455. #ifdef CONFIG_HOTPLUG_CPU
  4456. unregister_cpu_notifier(&conf->cpu_notify);
  4457. #endif
  4458. put_online_cpus();
  4459. free_percpu(conf->percpu);
  4460. }
  4461. static void free_conf(struct r5conf *conf)
  4462. {
  4463. shrink_stripes(conf);
  4464. raid5_free_percpu(conf);
  4465. kfree(conf->disks);
  4466. kfree(conf->stripe_hashtbl);
  4467. kfree(conf);
  4468. }
  4469. #ifdef CONFIG_HOTPLUG_CPU
  4470. static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
  4471. void *hcpu)
  4472. {
  4473. struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
  4474. long cpu = (long)hcpu;
  4475. struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
  4476. switch (action) {
  4477. case CPU_UP_PREPARE:
  4478. case CPU_UP_PREPARE_FROZEN:
  4479. if (conf->level == 6 && !percpu->spare_page)
  4480. percpu->spare_page = alloc_page(GFP_KERNEL);
  4481. if (!percpu->scribble)
  4482. percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
  4483. if (!percpu->scribble ||
  4484. (conf->level == 6 && !percpu->spare_page)) {
  4485. safe_put_page(percpu->spare_page);
  4486. kfree(percpu->scribble);
  4487. pr_err("%s: failed memory allocation for cpu%ld\n",
  4488. __func__, cpu);
  4489. return notifier_from_errno(-ENOMEM);
  4490. }
  4491. break;
  4492. case CPU_DEAD:
  4493. case CPU_DEAD_FROZEN:
  4494. safe_put_page(percpu->spare_page);
  4495. kfree(percpu->scribble);
  4496. percpu->spare_page = NULL;
  4497. percpu->scribble = NULL;
  4498. break;
  4499. default:
  4500. break;
  4501. }
  4502. return NOTIFY_OK;
  4503. }
  4504. #endif
  4505. static int raid5_alloc_percpu(struct r5conf *conf)
  4506. {
  4507. unsigned long cpu;
  4508. struct page *spare_page;
  4509. struct raid5_percpu __percpu *allcpus;
  4510. void *scribble;
  4511. int err;
  4512. allcpus = alloc_percpu(struct raid5_percpu);
  4513. if (!allcpus)
  4514. return -ENOMEM;
  4515. conf->percpu = allcpus;
  4516. get_online_cpus();
  4517. err = 0;
  4518. for_each_present_cpu(cpu) {
  4519. if (conf->level == 6) {
  4520. spare_page = alloc_page(GFP_KERNEL);
  4521. if (!spare_page) {
  4522. err = -ENOMEM;
  4523. break;
  4524. }
  4525. per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
  4526. }
  4527. scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
  4528. if (!scribble) {
  4529. err = -ENOMEM;
  4530. break;
  4531. }
  4532. per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
  4533. }
  4534. #ifdef CONFIG_HOTPLUG_CPU
  4535. conf->cpu_notify.notifier_call = raid456_cpu_notify;
  4536. conf->cpu_notify.priority = 0;
  4537. if (err == 0)
  4538. err = register_cpu_notifier(&conf->cpu_notify);
  4539. #endif
  4540. put_online_cpus();
  4541. return err;
  4542. }
  4543. static struct r5conf *setup_conf(struct mddev *mddev)
  4544. {
  4545. struct r5conf *conf;
  4546. int raid_disk, memory, max_disks;
  4547. struct md_rdev *rdev;
  4548. struct disk_info *disk;
  4549. char pers_name[6];
  4550. if (mddev->new_level != 5
  4551. && mddev->new_level != 4
  4552. && mddev->new_level != 6) {
  4553. printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
  4554. mdname(mddev), mddev->new_level);
  4555. return ERR_PTR(-EIO);
  4556. }
  4557. if ((mddev->new_level == 5
  4558. && !algorithm_valid_raid5(mddev->new_layout)) ||
  4559. (mddev->new_level == 6
  4560. && !algorithm_valid_raid6(mddev->new_layout))) {
  4561. printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
  4562. mdname(mddev), mddev->new_layout);
  4563. return ERR_PTR(-EIO);
  4564. }
  4565. if (mddev->new_level == 6 && mddev->raid_disks < 4) {
  4566. printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
  4567. mdname(mddev), mddev->raid_disks);
  4568. return ERR_PTR(-EINVAL);
  4569. }
  4570. if (!mddev->new_chunk_sectors ||
  4571. (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
  4572. !is_power_of_2(mddev->new_chunk_sectors)) {
  4573. printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
  4574. mdname(mddev), mddev->new_chunk_sectors << 9);
  4575. return ERR_PTR(-EINVAL);
  4576. }
  4577. conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
  4578. if (conf == NULL)
  4579. goto abort;
  4580. spin_lock_init(&conf->device_lock);
  4581. init_waitqueue_head(&conf->wait_for_stripe);
  4582. init_waitqueue_head(&conf->wait_for_overlap);
  4583. INIT_LIST_HEAD(&conf->handle_list);
  4584. INIT_LIST_HEAD(&conf->hold_list);
  4585. INIT_LIST_HEAD(&conf->delayed_list);
  4586. INIT_LIST_HEAD(&conf->bitmap_list);
  4587. INIT_LIST_HEAD(&conf->inactive_list);
  4588. atomic_set(&conf->active_stripes, 0);
  4589. atomic_set(&conf->preread_active_stripes, 0);
  4590. atomic_set(&conf->active_aligned_reads, 0);
  4591. conf->bypass_threshold = BYPASS_THRESHOLD;
  4592. conf->recovery_disabled = mddev->recovery_disabled - 1;
  4593. conf->raid_disks = mddev->raid_disks;
  4594. if (mddev->reshape_position == MaxSector)
  4595. conf->previous_raid_disks = mddev->raid_disks;
  4596. else
  4597. conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
  4598. max_disks = max(conf->raid_disks, conf->previous_raid_disks);
  4599. conf->scribble_len = scribble_len(max_disks);
  4600. conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
  4601. GFP_KERNEL);
  4602. if (!conf->disks)
  4603. goto abort;
  4604. conf->mddev = mddev;
  4605. if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
  4606. goto abort;
  4607. conf->level = mddev->new_level;
  4608. if (raid5_alloc_percpu(conf) != 0)
  4609. goto abort;
  4610. pr_debug("raid456: run(%s) called.\n", mdname(mddev));
  4611. rdev_for_each(rdev, mddev) {
  4612. raid_disk = rdev->raid_disk;
  4613. if (raid_disk >= max_disks
  4614. || raid_disk < 0)
  4615. continue;
  4616. disk = conf->disks + raid_disk;
  4617. if (test_bit(Replacement, &rdev->flags)) {
  4618. if (disk->replacement)
  4619. goto abort;
  4620. disk->replacement = rdev;
  4621. } else {
  4622. if (disk->rdev)
  4623. goto abort;
  4624. disk->rdev = rdev;
  4625. }
  4626. if (test_bit(In_sync, &rdev->flags)) {
  4627. char b[BDEVNAME_SIZE];
  4628. printk(KERN_INFO "md/raid:%s: device %s operational as raid"
  4629. " disk %d\n",
  4630. mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
  4631. } else if (rdev->saved_raid_disk != raid_disk)
  4632. /* Cannot rely on bitmap to complete recovery */
  4633. conf->fullsync = 1;
  4634. }
  4635. conf->chunk_sectors = mddev->new_chunk_sectors;
  4636. conf->level = mddev->new_level;
  4637. if (conf->level == 6)
  4638. conf->max_degraded = 2;
  4639. else
  4640. conf->max_degraded = 1;
  4641. conf->algorithm = mddev->new_layout;
  4642. conf->max_nr_stripes = NR_STRIPES;
  4643. conf->reshape_progress = mddev->reshape_position;
  4644. if (conf->reshape_progress != MaxSector) {
  4645. conf->prev_chunk_sectors = mddev->chunk_sectors;
  4646. conf->prev_algo = mddev->layout;
  4647. }
  4648. memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
  4649. max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
  4650. if (grow_stripes(conf, conf->max_nr_stripes)) {
  4651. printk(KERN_ERR
  4652. "md/raid:%s: couldn't allocate %dkB for buffers\n",
  4653. mdname(mddev), memory);
  4654. goto abort;
  4655. } else
  4656. printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
  4657. mdname(mddev), memory);
  4658. sprintf(pers_name, "raid%d", mddev->new_level);
  4659. conf->thread = md_register_thread(raid5d, mddev, pers_name);
  4660. if (!conf->thread) {
  4661. printk(KERN_ERR
  4662. "md/raid:%s: couldn't allocate thread.\n",
  4663. mdname(mddev));
  4664. goto abort;
  4665. }
  4666. return conf;
  4667. abort:
  4668. if (conf) {
  4669. free_conf(conf);
  4670. return ERR_PTR(-EIO);
  4671. } else
  4672. return ERR_PTR(-ENOMEM);
  4673. }
  4674. static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
  4675. {
  4676. switch (algo) {
  4677. case ALGORITHM_PARITY_0:
  4678. if (raid_disk < max_degraded)
  4679. return 1;
  4680. break;
  4681. case ALGORITHM_PARITY_N:
  4682. if (raid_disk >= raid_disks - max_degraded)
  4683. return 1;
  4684. break;
  4685. case ALGORITHM_PARITY_0_6:
  4686. if (raid_disk == 0 ||
  4687. raid_disk == raid_disks - 1)
  4688. return 1;
  4689. break;
  4690. case ALGORITHM_LEFT_ASYMMETRIC_6:
  4691. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  4692. case ALGORITHM_LEFT_SYMMETRIC_6:
  4693. case ALGORITHM_RIGHT_SYMMETRIC_6:
  4694. if (raid_disk == raid_disks - 1)
  4695. return 1;
  4696. }
  4697. return 0;
  4698. }
  4699. static int run(struct mddev *mddev)
  4700. {
  4701. struct r5conf *conf;
  4702. int working_disks = 0;
  4703. int dirty_parity_disks = 0;
  4704. struct md_rdev *rdev;
  4705. sector_t reshape_offset = 0;
  4706. int i;
  4707. long long min_offset_diff = 0;
  4708. int first = 1;
  4709. if (mddev->recovery_cp != MaxSector)
  4710. printk(KERN_NOTICE "md/raid:%s: not clean"
  4711. " -- starting background reconstruction\n",
  4712. mdname(mddev));
  4713. rdev_for_each(rdev, mddev) {
  4714. long long diff;
  4715. if (rdev->raid_disk < 0)
  4716. continue;
  4717. diff = (rdev->new_data_offset - rdev->data_offset);
  4718. if (first) {
  4719. min_offset_diff = diff;
  4720. first = 0;
  4721. } else if (mddev->reshape_backwards &&
  4722. diff < min_offset_diff)
  4723. min_offset_diff = diff;
  4724. else if (!mddev->reshape_backwards &&
  4725. diff > min_offset_diff)
  4726. min_offset_diff = diff;
  4727. }
  4728. if (mddev->reshape_position != MaxSector) {
  4729. /* Check that we can continue the reshape.
  4730. * Difficulties arise if the stripe we would write to
  4731. * next is at or after the stripe we would read from next.
  4732. * For a reshape that changes the number of devices, this
  4733. * is only possible for a very short time, and mdadm makes
  4734. * sure that time appears to have past before assembling
  4735. * the array. So we fail if that time hasn't passed.
  4736. * For a reshape that keeps the number of devices the same
  4737. * mdadm must be monitoring the reshape can keeping the
  4738. * critical areas read-only and backed up. It will start
  4739. * the array in read-only mode, so we check for that.
  4740. */
  4741. sector_t here_new, here_old;
  4742. int old_disks;
  4743. int max_degraded = (mddev->level == 6 ? 2 : 1);
  4744. if (mddev->new_level != mddev->level) {
  4745. printk(KERN_ERR "md/raid:%s: unsupported reshape "
  4746. "required - aborting.\n",
  4747. mdname(mddev));
  4748. return -EINVAL;
  4749. }
  4750. old_disks = mddev->raid_disks - mddev->delta_disks;
  4751. /* reshape_position must be on a new-stripe boundary, and one
  4752. * further up in new geometry must map after here in old
  4753. * geometry.
  4754. */
  4755. here_new = mddev->reshape_position;
  4756. if (sector_div(here_new, mddev->new_chunk_sectors *
  4757. (mddev->raid_disks - max_degraded))) {
  4758. printk(KERN_ERR "md/raid:%s: reshape_position not "
  4759. "on a stripe boundary\n", mdname(mddev));
  4760. return -EINVAL;
  4761. }
  4762. reshape_offset = here_new * mddev->new_chunk_sectors;
  4763. /* here_new is the stripe we will write to */
  4764. here_old = mddev->reshape_position;
  4765. sector_div(here_old, mddev->chunk_sectors *
  4766. (old_disks-max_degraded));
  4767. /* here_old is the first stripe that we might need to read
  4768. * from */
  4769. if (mddev->delta_disks == 0) {
  4770. if ((here_new * mddev->new_chunk_sectors !=
  4771. here_old * mddev->chunk_sectors)) {
  4772. printk(KERN_ERR "md/raid:%s: reshape position is"
  4773. " confused - aborting\n", mdname(mddev));
  4774. return -EINVAL;
  4775. }
  4776. /* We cannot be sure it is safe to start an in-place
  4777. * reshape. It is only safe if user-space is monitoring
  4778. * and taking constant backups.
  4779. * mdadm always starts a situation like this in
  4780. * readonly mode so it can take control before
  4781. * allowing any writes. So just check for that.
  4782. */
  4783. if (abs(min_offset_diff) >= mddev->chunk_sectors &&
  4784. abs(min_offset_diff) >= mddev->new_chunk_sectors)
  4785. /* not really in-place - so OK */;
  4786. else if (mddev->ro == 0) {
  4787. printk(KERN_ERR "md/raid:%s: in-place reshape "
  4788. "must be started in read-only mode "
  4789. "- aborting\n",
  4790. mdname(mddev));
  4791. return -EINVAL;
  4792. }
  4793. } else if (mddev->reshape_backwards
  4794. ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
  4795. here_old * mddev->chunk_sectors)
  4796. : (here_new * mddev->new_chunk_sectors >=
  4797. here_old * mddev->chunk_sectors + (-min_offset_diff))) {
  4798. /* Reading from the same stripe as writing to - bad */
  4799. printk(KERN_ERR "md/raid:%s: reshape_position too early for "
  4800. "auto-recovery - aborting.\n",
  4801. mdname(mddev));
  4802. return -EINVAL;
  4803. }
  4804. printk(KERN_INFO "md/raid:%s: reshape will continue\n",
  4805. mdname(mddev));
  4806. /* OK, we should be able to continue; */
  4807. } else {
  4808. BUG_ON(mddev->level != mddev->new_level);
  4809. BUG_ON(mddev->layout != mddev->new_layout);
  4810. BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
  4811. BUG_ON(mddev->delta_disks != 0);
  4812. }
  4813. if (mddev->private == NULL)
  4814. conf = setup_conf(mddev);
  4815. else
  4816. conf = mddev->private;
  4817. if (IS_ERR(conf))
  4818. return PTR_ERR(conf);
  4819. conf->min_offset_diff = min_offset_diff;
  4820. mddev->thread = conf->thread;
  4821. conf->thread = NULL;
  4822. mddev->private = conf;
  4823. for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
  4824. i++) {
  4825. rdev = conf->disks[i].rdev;
  4826. if (!rdev && conf->disks[i].replacement) {
  4827. /* The replacement is all we have yet */
  4828. rdev = conf->disks[i].replacement;
  4829. conf->disks[i].replacement = NULL;
  4830. clear_bit(Replacement, &rdev->flags);
  4831. conf->disks[i].rdev = rdev;
  4832. }
  4833. if (!rdev)
  4834. continue;
  4835. if (conf->disks[i].replacement &&
  4836. conf->reshape_progress != MaxSector) {
  4837. /* replacements and reshape simply do not mix. */
  4838. printk(KERN_ERR "md: cannot handle concurrent "
  4839. "replacement and reshape.\n");
  4840. goto abort;
  4841. }
  4842. if (test_bit(In_sync, &rdev->flags)) {
  4843. working_disks++;
  4844. continue;
  4845. }
  4846. /* This disc is not fully in-sync. However if it
  4847. * just stored parity (beyond the recovery_offset),
  4848. * when we don't need to be concerned about the
  4849. * array being dirty.
  4850. * When reshape goes 'backwards', we never have
  4851. * partially completed devices, so we only need
  4852. * to worry about reshape going forwards.
  4853. */
  4854. /* Hack because v0.91 doesn't store recovery_offset properly. */
  4855. if (mddev->major_version == 0 &&
  4856. mddev->minor_version > 90)
  4857. rdev->recovery_offset = reshape_offset;
  4858. if (rdev->recovery_offset < reshape_offset) {
  4859. /* We need to check old and new layout */
  4860. if (!only_parity(rdev->raid_disk,
  4861. conf->algorithm,
  4862. conf->raid_disks,
  4863. conf->max_degraded))
  4864. continue;
  4865. }
  4866. if (!only_parity(rdev->raid_disk,
  4867. conf->prev_algo,
  4868. conf->previous_raid_disks,
  4869. conf->max_degraded))
  4870. continue;
  4871. dirty_parity_disks++;
  4872. }
  4873. /*
  4874. * 0 for a fully functional array, 1 or 2 for a degraded array.
  4875. */
  4876. mddev->degraded = calc_degraded(conf);
  4877. if (has_failed(conf)) {
  4878. printk(KERN_ERR "md/raid:%s: not enough operational devices"
  4879. " (%d/%d failed)\n",
  4880. mdname(mddev), mddev->degraded, conf->raid_disks);
  4881. goto abort;
  4882. }
  4883. /* device size must be a multiple of chunk size */
  4884. mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
  4885. mddev->resync_max_sectors = mddev->dev_sectors;
  4886. if (mddev->degraded > dirty_parity_disks &&
  4887. mddev->recovery_cp != MaxSector) {
  4888. if (mddev->ok_start_degraded)
  4889. printk(KERN_WARNING
  4890. "md/raid:%s: starting dirty degraded array"
  4891. " - data corruption possible.\n",
  4892. mdname(mddev));
  4893. else {
  4894. printk(KERN_ERR
  4895. "md/raid:%s: cannot start dirty degraded array.\n",
  4896. mdname(mddev));
  4897. goto abort;
  4898. }
  4899. }
  4900. if (mddev->degraded == 0)
  4901. printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
  4902. " devices, algorithm %d\n", mdname(mddev), conf->level,
  4903. mddev->raid_disks-mddev->degraded, mddev->raid_disks,
  4904. mddev->new_layout);
  4905. else
  4906. printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
  4907. " out of %d devices, algorithm %d\n",
  4908. mdname(mddev), conf->level,
  4909. mddev->raid_disks - mddev->degraded,
  4910. mddev->raid_disks, mddev->new_layout);
  4911. print_raid5_conf(conf);
  4912. if (conf->reshape_progress != MaxSector) {
  4913. conf->reshape_safe = conf->reshape_progress;
  4914. atomic_set(&conf->reshape_stripes, 0);
  4915. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  4916. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  4917. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  4918. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  4919. mddev->sync_thread = md_register_thread(md_do_sync, mddev,
  4920. "reshape");
  4921. }
  4922. /* Ok, everything is just fine now */
  4923. if (mddev->to_remove == &raid5_attrs_group)
  4924. mddev->to_remove = NULL;
  4925. else if (mddev->kobj.sd &&
  4926. sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
  4927. printk(KERN_WARNING
  4928. "raid5: failed to create sysfs attributes for %s\n",
  4929. mdname(mddev));
  4930. md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
  4931. if (mddev->queue) {
  4932. int chunk_size;
  4933. bool discard_supported = true;
  4934. /* read-ahead size must cover two whole stripes, which
  4935. * is 2 * (datadisks) * chunksize where 'n' is the
  4936. * number of raid devices
  4937. */
  4938. int data_disks = conf->previous_raid_disks - conf->max_degraded;
  4939. int stripe = data_disks *
  4940. ((mddev->chunk_sectors << 9) / PAGE_SIZE);
  4941. if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
  4942. mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
  4943. blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
  4944. mddev->queue->backing_dev_info.congested_data = mddev;
  4945. mddev->queue->backing_dev_info.congested_fn = raid5_congested;
  4946. chunk_size = mddev->chunk_sectors << 9;
  4947. blk_queue_io_min(mddev->queue, chunk_size);
  4948. blk_queue_io_opt(mddev->queue, chunk_size *
  4949. (conf->raid_disks - conf->max_degraded));
  4950. /*
  4951. * We can only discard a whole stripe. It doesn't make sense to
  4952. * discard data disk but write parity disk
  4953. */
  4954. stripe = stripe * PAGE_SIZE;
  4955. mddev->queue->limits.discard_alignment = stripe;
  4956. mddev->queue->limits.discard_granularity = stripe;
  4957. /*
  4958. * unaligned part of discard request will be ignored, so can't
  4959. * guarantee discard_zerors_data
  4960. */
  4961. mddev->queue->limits.discard_zeroes_data = 0;
  4962. rdev_for_each(rdev, mddev) {
  4963. disk_stack_limits(mddev->gendisk, rdev->bdev,
  4964. rdev->data_offset << 9);
  4965. disk_stack_limits(mddev->gendisk, rdev->bdev,
  4966. rdev->new_data_offset << 9);
  4967. /*
  4968. * discard_zeroes_data is required, otherwise data
  4969. * could be lost. Consider a scenario: discard a stripe
  4970. * (the stripe could be inconsistent if
  4971. * discard_zeroes_data is 0); write one disk of the
  4972. * stripe (the stripe could be inconsistent again
  4973. * depending on which disks are used to calculate
  4974. * parity); the disk is broken; The stripe data of this
  4975. * disk is lost.
  4976. */
  4977. if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
  4978. !bdev_get_queue(rdev->bdev)->
  4979. limits.discard_zeroes_data)
  4980. discard_supported = false;
  4981. }
  4982. if (discard_supported &&
  4983. mddev->queue->limits.max_discard_sectors >= stripe &&
  4984. mddev->queue->limits.discard_granularity >= stripe)
  4985. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
  4986. mddev->queue);
  4987. else
  4988. queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
  4989. mddev->queue);
  4990. }
  4991. return 0;
  4992. abort:
  4993. md_unregister_thread(&mddev->thread);
  4994. print_raid5_conf(conf);
  4995. free_conf(conf);
  4996. mddev->private = NULL;
  4997. printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
  4998. return -EIO;
  4999. }
  5000. static int stop(struct mddev *mddev)
  5001. {
  5002. struct r5conf *conf = mddev->private;
  5003. md_unregister_thread(&mddev->thread);
  5004. if (mddev->queue)
  5005. mddev->queue->backing_dev_info.congested_fn = NULL;
  5006. free_conf(conf);
  5007. mddev->private = NULL;
  5008. mddev->to_remove = &raid5_attrs_group;
  5009. return 0;
  5010. }
  5011. static void status(struct seq_file *seq, struct mddev *mddev)
  5012. {
  5013. struct r5conf *conf = mddev->private;
  5014. int i;
  5015. seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
  5016. mddev->chunk_sectors / 2, mddev->layout);
  5017. seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
  5018. for (i = 0; i < conf->raid_disks; i++)
  5019. seq_printf (seq, "%s",
  5020. conf->disks[i].rdev &&
  5021. test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
  5022. seq_printf (seq, "]");
  5023. }
  5024. static void print_raid5_conf (struct r5conf *conf)
  5025. {
  5026. int i;
  5027. struct disk_info *tmp;
  5028. printk(KERN_DEBUG "RAID conf printout:\n");
  5029. if (!conf) {
  5030. printk("(conf==NULL)\n");
  5031. return;
  5032. }
  5033. printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
  5034. conf->raid_disks,
  5035. conf->raid_disks - conf->mddev->degraded);
  5036. for (i = 0; i < conf->raid_disks; i++) {
  5037. char b[BDEVNAME_SIZE];
  5038. tmp = conf->disks + i;
  5039. if (tmp->rdev)
  5040. printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
  5041. i, !test_bit(Faulty, &tmp->rdev->flags),
  5042. bdevname(tmp->rdev->bdev, b));
  5043. }
  5044. }
  5045. static int raid5_spare_active(struct mddev *mddev)
  5046. {
  5047. int i;
  5048. struct r5conf *conf = mddev->private;
  5049. struct disk_info *tmp;
  5050. int count = 0;
  5051. unsigned long flags;
  5052. for (i = 0; i < conf->raid_disks; i++) {
  5053. tmp = conf->disks + i;
  5054. if (tmp->replacement
  5055. && tmp->replacement->recovery_offset == MaxSector
  5056. && !test_bit(Faulty, &tmp->replacement->flags)
  5057. && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
  5058. /* Replacement has just become active. */
  5059. if (!tmp->rdev
  5060. || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
  5061. count++;
  5062. if (tmp->rdev) {
  5063. /* Replaced device not technically faulty,
  5064. * but we need to be sure it gets removed
  5065. * and never re-added.
  5066. */
  5067. set_bit(Faulty, &tmp->rdev->flags);
  5068. sysfs_notify_dirent_safe(
  5069. tmp->rdev->sysfs_state);
  5070. }
  5071. sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
  5072. } else if (tmp->rdev
  5073. && tmp->rdev->recovery_offset == MaxSector
  5074. && !test_bit(Faulty, &tmp->rdev->flags)
  5075. && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
  5076. count++;
  5077. sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
  5078. }
  5079. }
  5080. spin_lock_irqsave(&conf->device_lock, flags);
  5081. mddev->degraded = calc_degraded(conf);
  5082. spin_unlock_irqrestore(&conf->device_lock, flags);
  5083. print_raid5_conf(conf);
  5084. return count;
  5085. }
  5086. static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
  5087. {
  5088. struct r5conf *conf = mddev->private;
  5089. int err = 0;
  5090. int number = rdev->raid_disk;
  5091. struct md_rdev **rdevp;
  5092. struct disk_info *p = conf->disks + number;
  5093. print_raid5_conf(conf);
  5094. if (rdev == p->rdev)
  5095. rdevp = &p->rdev;
  5096. else if (rdev == p->replacement)
  5097. rdevp = &p->replacement;
  5098. else
  5099. return 0;
  5100. if (number >= conf->raid_disks &&
  5101. conf->reshape_progress == MaxSector)
  5102. clear_bit(In_sync, &rdev->flags);
  5103. if (test_bit(In_sync, &rdev->flags) ||
  5104. atomic_read(&rdev->nr_pending)) {
  5105. err = -EBUSY;
  5106. goto abort;
  5107. }
  5108. /* Only remove non-faulty devices if recovery
  5109. * isn't possible.
  5110. */
  5111. if (!test_bit(Faulty, &rdev->flags) &&
  5112. mddev->recovery_disabled != conf->recovery_disabled &&
  5113. !has_failed(conf) &&
  5114. (!p->replacement || p->replacement == rdev) &&
  5115. number < conf->raid_disks) {
  5116. err = -EBUSY;
  5117. goto abort;
  5118. }
  5119. *rdevp = NULL;
  5120. synchronize_rcu();
  5121. if (atomic_read(&rdev->nr_pending)) {
  5122. /* lost the race, try later */
  5123. err = -EBUSY;
  5124. *rdevp = rdev;
  5125. } else if (p->replacement) {
  5126. /* We must have just cleared 'rdev' */
  5127. p->rdev = p->replacement;
  5128. clear_bit(Replacement, &p->replacement->flags);
  5129. smp_mb(); /* Make sure other CPUs may see both as identical
  5130. * but will never see neither - if they are careful
  5131. */
  5132. p->replacement = NULL;
  5133. clear_bit(WantReplacement, &rdev->flags);
  5134. } else
  5135. /* We might have just removed the Replacement as faulty-
  5136. * clear the bit just in case
  5137. */
  5138. clear_bit(WantReplacement, &rdev->flags);
  5139. abort:
  5140. print_raid5_conf(conf);
  5141. return err;
  5142. }
  5143. static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
  5144. {
  5145. struct r5conf *conf = mddev->private;
  5146. int err = -EEXIST;
  5147. int disk;
  5148. struct disk_info *p;
  5149. int first = 0;
  5150. int last = conf->raid_disks - 1;
  5151. if (mddev->recovery_disabled == conf->recovery_disabled)
  5152. return -EBUSY;
  5153. if (rdev->saved_raid_disk < 0 && has_failed(conf))
  5154. /* no point adding a device */
  5155. return -EINVAL;
  5156. if (rdev->raid_disk >= 0)
  5157. first = last = rdev->raid_disk;
  5158. /*
  5159. * find the disk ... but prefer rdev->saved_raid_disk
  5160. * if possible.
  5161. */
  5162. if (rdev->saved_raid_disk >= 0 &&
  5163. rdev->saved_raid_disk >= first &&
  5164. conf->disks[rdev->saved_raid_disk].rdev == NULL)
  5165. first = rdev->saved_raid_disk;
  5166. for (disk = first; disk <= last; disk++) {
  5167. p = conf->disks + disk;
  5168. if (p->rdev == NULL) {
  5169. clear_bit(In_sync, &rdev->flags);
  5170. rdev->raid_disk = disk;
  5171. err = 0;
  5172. if (rdev->saved_raid_disk != disk)
  5173. conf->fullsync = 1;
  5174. rcu_assign_pointer(p->rdev, rdev);
  5175. goto out;
  5176. }
  5177. }
  5178. for (disk = first; disk <= last; disk++) {
  5179. p = conf->disks + disk;
  5180. if (test_bit(WantReplacement, &p->rdev->flags) &&
  5181. p->replacement == NULL) {
  5182. clear_bit(In_sync, &rdev->flags);
  5183. set_bit(Replacement, &rdev->flags);
  5184. rdev->raid_disk = disk;
  5185. err = 0;
  5186. conf->fullsync = 1;
  5187. rcu_assign_pointer(p->replacement, rdev);
  5188. break;
  5189. }
  5190. }
  5191. out:
  5192. print_raid5_conf(conf);
  5193. return err;
  5194. }
  5195. static int raid5_resize(struct mddev *mddev, sector_t sectors)
  5196. {
  5197. /* no resync is happening, and there is enough space
  5198. * on all devices, so we can resize.
  5199. * We need to make sure resync covers any new space.
  5200. * If the array is shrinking we should possibly wait until
  5201. * any io in the removed space completes, but it hardly seems
  5202. * worth it.
  5203. */
  5204. sector_t newsize;
  5205. sectors &= ~((sector_t)mddev->chunk_sectors - 1);
  5206. newsize = raid5_size(mddev, sectors, mddev->raid_disks);
  5207. if (mddev->external_size &&
  5208. mddev->array_sectors > newsize)
  5209. return -EINVAL;
  5210. if (mddev->bitmap) {
  5211. int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
  5212. if (ret)
  5213. return ret;
  5214. }
  5215. md_set_array_sectors(mddev, newsize);
  5216. set_capacity(mddev->gendisk, mddev->array_sectors);
  5217. revalidate_disk(mddev->gendisk);
  5218. if (sectors > mddev->dev_sectors &&
  5219. mddev->recovery_cp > mddev->dev_sectors) {
  5220. mddev->recovery_cp = mddev->dev_sectors;
  5221. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5222. }
  5223. mddev->dev_sectors = sectors;
  5224. mddev->resync_max_sectors = sectors;
  5225. return 0;
  5226. }
  5227. static int check_stripe_cache(struct mddev *mddev)
  5228. {
  5229. /* Can only proceed if there are plenty of stripe_heads.
  5230. * We need a minimum of one full stripe,, and for sensible progress
  5231. * it is best to have about 4 times that.
  5232. * If we require 4 times, then the default 256 4K stripe_heads will
  5233. * allow for chunk sizes up to 256K, which is probably OK.
  5234. * If the chunk size is greater, user-space should request more
  5235. * stripe_heads first.
  5236. */
  5237. struct r5conf *conf = mddev->private;
  5238. if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
  5239. > conf->max_nr_stripes ||
  5240. ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
  5241. > conf->max_nr_stripes) {
  5242. printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
  5243. mdname(mddev),
  5244. ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
  5245. / STRIPE_SIZE)*4);
  5246. return 0;
  5247. }
  5248. return 1;
  5249. }
  5250. static int check_reshape(struct mddev *mddev)
  5251. {
  5252. struct r5conf *conf = mddev->private;
  5253. if (mddev->delta_disks == 0 &&
  5254. mddev->new_layout == mddev->layout &&
  5255. mddev->new_chunk_sectors == mddev->chunk_sectors)
  5256. return 0; /* nothing to do */
  5257. if (has_failed(conf))
  5258. return -EINVAL;
  5259. if (mddev->delta_disks < 0) {
  5260. /* We might be able to shrink, but the devices must
  5261. * be made bigger first.
  5262. * For raid6, 4 is the minimum size.
  5263. * Otherwise 2 is the minimum
  5264. */
  5265. int min = 2;
  5266. if (mddev->level == 6)
  5267. min = 4;
  5268. if (mddev->raid_disks + mddev->delta_disks < min)
  5269. return -EINVAL;
  5270. }
  5271. if (!check_stripe_cache(mddev))
  5272. return -ENOSPC;
  5273. return resize_stripes(conf, (conf->previous_raid_disks
  5274. + mddev->delta_disks));
  5275. }
  5276. static int raid5_start_reshape(struct mddev *mddev)
  5277. {
  5278. struct r5conf *conf = mddev->private;
  5279. struct md_rdev *rdev;
  5280. int spares = 0;
  5281. unsigned long flags;
  5282. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  5283. return -EBUSY;
  5284. if (!check_stripe_cache(mddev))
  5285. return -ENOSPC;
  5286. if (has_failed(conf))
  5287. return -EINVAL;
  5288. rdev_for_each(rdev, mddev) {
  5289. if (!test_bit(In_sync, &rdev->flags)
  5290. && !test_bit(Faulty, &rdev->flags))
  5291. spares++;
  5292. }
  5293. if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
  5294. /* Not enough devices even to make a degraded array
  5295. * of that size
  5296. */
  5297. return -EINVAL;
  5298. /* Refuse to reduce size of the array. Any reductions in
  5299. * array size must be through explicit setting of array_size
  5300. * attribute.
  5301. */
  5302. if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
  5303. < mddev->array_sectors) {
  5304. printk(KERN_ERR "md/raid:%s: array size must be reduced "
  5305. "before number of disks\n", mdname(mddev));
  5306. return -EINVAL;
  5307. }
  5308. atomic_set(&conf->reshape_stripes, 0);
  5309. spin_lock_irq(&conf->device_lock);
  5310. conf->previous_raid_disks = conf->raid_disks;
  5311. conf->raid_disks += mddev->delta_disks;
  5312. conf->prev_chunk_sectors = conf->chunk_sectors;
  5313. conf->chunk_sectors = mddev->new_chunk_sectors;
  5314. conf->prev_algo = conf->algorithm;
  5315. conf->algorithm = mddev->new_layout;
  5316. conf->generation++;
  5317. /* Code that selects data_offset needs to see the generation update
  5318. * if reshape_progress has been set - so a memory barrier needed.
  5319. */
  5320. smp_mb();
  5321. if (mddev->reshape_backwards)
  5322. conf->reshape_progress = raid5_size(mddev, 0, 0);
  5323. else
  5324. conf->reshape_progress = 0;
  5325. conf->reshape_safe = conf->reshape_progress;
  5326. spin_unlock_irq(&conf->device_lock);
  5327. /* Add some new drives, as many as will fit.
  5328. * We know there are enough to make the newly sized array work.
  5329. * Don't add devices if we are reducing the number of
  5330. * devices in the array. This is because it is not possible
  5331. * to correctly record the "partially reconstructed" state of
  5332. * such devices during the reshape and confusion could result.
  5333. */
  5334. if (mddev->delta_disks >= 0) {
  5335. rdev_for_each(rdev, mddev)
  5336. if (rdev->raid_disk < 0 &&
  5337. !test_bit(Faulty, &rdev->flags)) {
  5338. if (raid5_add_disk(mddev, rdev) == 0) {
  5339. if (rdev->raid_disk
  5340. >= conf->previous_raid_disks)
  5341. set_bit(In_sync, &rdev->flags);
  5342. else
  5343. rdev->recovery_offset = 0;
  5344. if (sysfs_link_rdev(mddev, rdev))
  5345. /* Failure here is OK */;
  5346. }
  5347. } else if (rdev->raid_disk >= conf->previous_raid_disks
  5348. && !test_bit(Faulty, &rdev->flags)) {
  5349. /* This is a spare that was manually added */
  5350. set_bit(In_sync, &rdev->flags);
  5351. }
  5352. /* When a reshape changes the number of devices,
  5353. * ->degraded is measured against the larger of the
  5354. * pre and post number of devices.
  5355. */
  5356. spin_lock_irqsave(&conf->device_lock, flags);
  5357. mddev->degraded = calc_degraded(conf);
  5358. spin_unlock_irqrestore(&conf->device_lock, flags);
  5359. }
  5360. mddev->raid_disks = conf->raid_disks;
  5361. mddev->reshape_position = conf->reshape_progress;
  5362. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  5363. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  5364. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  5365. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  5366. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  5367. mddev->sync_thread = md_register_thread(md_do_sync, mddev,
  5368. "reshape");
  5369. if (!mddev->sync_thread) {
  5370. mddev->recovery = 0;
  5371. spin_lock_irq(&conf->device_lock);
  5372. mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
  5373. rdev_for_each(rdev, mddev)
  5374. rdev->new_data_offset = rdev->data_offset;
  5375. smp_wmb();
  5376. conf->reshape_progress = MaxSector;
  5377. mddev->reshape_position = MaxSector;
  5378. spin_unlock_irq(&conf->device_lock);
  5379. return -EAGAIN;
  5380. }
  5381. conf->reshape_checkpoint = jiffies;
  5382. md_wakeup_thread(mddev->sync_thread);
  5383. md_new_event(mddev);
  5384. return 0;
  5385. }
  5386. /* This is called from the reshape thread and should make any
  5387. * changes needed in 'conf'
  5388. */
  5389. static void end_reshape(struct r5conf *conf)
  5390. {
  5391. if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
  5392. struct md_rdev *rdev;
  5393. spin_lock_irq(&conf->device_lock);
  5394. conf->previous_raid_disks = conf->raid_disks;
  5395. rdev_for_each(rdev, conf->mddev)
  5396. rdev->data_offset = rdev->new_data_offset;
  5397. smp_wmb();
  5398. conf->reshape_progress = MaxSector;
  5399. spin_unlock_irq(&conf->device_lock);
  5400. wake_up(&conf->wait_for_overlap);
  5401. /* read-ahead size must cover two whole stripes, which is
  5402. * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
  5403. */
  5404. if (conf->mddev->queue) {
  5405. int data_disks = conf->raid_disks - conf->max_degraded;
  5406. int stripe = data_disks * ((conf->chunk_sectors << 9)
  5407. / PAGE_SIZE);
  5408. if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
  5409. conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
  5410. }
  5411. }
  5412. }
  5413. /* This is called from the raid5d thread with mddev_lock held.
  5414. * It makes config changes to the device.
  5415. */
  5416. static void raid5_finish_reshape(struct mddev *mddev)
  5417. {
  5418. struct r5conf *conf = mddev->private;
  5419. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  5420. if (mddev->delta_disks > 0) {
  5421. md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
  5422. set_capacity(mddev->gendisk, mddev->array_sectors);
  5423. revalidate_disk(mddev->gendisk);
  5424. } else {
  5425. int d;
  5426. spin_lock_irq(&conf->device_lock);
  5427. mddev->degraded = calc_degraded(conf);
  5428. spin_unlock_irq(&conf->device_lock);
  5429. for (d = conf->raid_disks ;
  5430. d < conf->raid_disks - mddev->delta_disks;
  5431. d++) {
  5432. struct md_rdev *rdev = conf->disks[d].rdev;
  5433. if (rdev)
  5434. clear_bit(In_sync, &rdev->flags);
  5435. rdev = conf->disks[d].replacement;
  5436. if (rdev)
  5437. clear_bit(In_sync, &rdev->flags);
  5438. }
  5439. }
  5440. mddev->layout = conf->algorithm;
  5441. mddev->chunk_sectors = conf->chunk_sectors;
  5442. mddev->reshape_position = MaxSector;
  5443. mddev->delta_disks = 0;
  5444. mddev->reshape_backwards = 0;
  5445. }
  5446. }
  5447. static void raid5_quiesce(struct mddev *mddev, int state)
  5448. {
  5449. struct r5conf *conf = mddev->private;
  5450. switch(state) {
  5451. case 2: /* resume for a suspend */
  5452. wake_up(&conf->wait_for_overlap);
  5453. break;
  5454. case 1: /* stop all writes */
  5455. spin_lock_irq(&conf->device_lock);
  5456. /* '2' tells resync/reshape to pause so that all
  5457. * active stripes can drain
  5458. */
  5459. conf->quiesce = 2;
  5460. wait_event_lock_irq(conf->wait_for_stripe,
  5461. atomic_read(&conf->active_stripes) == 0 &&
  5462. atomic_read(&conf->active_aligned_reads) == 0,
  5463. conf->device_lock, /* nothing */);
  5464. conf->quiesce = 1;
  5465. spin_unlock_irq(&conf->device_lock);
  5466. /* allow reshape to continue */
  5467. wake_up(&conf->wait_for_overlap);
  5468. break;
  5469. case 0: /* re-enable writes */
  5470. spin_lock_irq(&conf->device_lock);
  5471. conf->quiesce = 0;
  5472. wake_up(&conf->wait_for_stripe);
  5473. wake_up(&conf->wait_for_overlap);
  5474. spin_unlock_irq(&conf->device_lock);
  5475. break;
  5476. }
  5477. }
  5478. static void *raid45_takeover_raid0(struct mddev *mddev, int level)
  5479. {
  5480. struct r0conf *raid0_conf = mddev->private;
  5481. sector_t sectors;
  5482. /* for raid0 takeover only one zone is supported */
  5483. if (raid0_conf->nr_strip_zones > 1) {
  5484. printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
  5485. mdname(mddev));
  5486. return ERR_PTR(-EINVAL);
  5487. }
  5488. sectors = raid0_conf->strip_zone[0].zone_end;
  5489. sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
  5490. mddev->dev_sectors = sectors;
  5491. mddev->new_level = level;
  5492. mddev->new_layout = ALGORITHM_PARITY_N;
  5493. mddev->new_chunk_sectors = mddev->chunk_sectors;
  5494. mddev->raid_disks += 1;
  5495. mddev->delta_disks = 1;
  5496. /* make sure it will be not marked as dirty */
  5497. mddev->recovery_cp = MaxSector;
  5498. return setup_conf(mddev);
  5499. }
  5500. static void *raid5_takeover_raid1(struct mddev *mddev)
  5501. {
  5502. int chunksect;
  5503. if (mddev->raid_disks != 2 ||
  5504. mddev->degraded > 1)
  5505. return ERR_PTR(-EINVAL);
  5506. /* Should check if there are write-behind devices? */
  5507. chunksect = 64*2; /* 64K by default */
  5508. /* The array must be an exact multiple of chunksize */
  5509. while (chunksect && (mddev->array_sectors & (chunksect-1)))
  5510. chunksect >>= 1;
  5511. if ((chunksect<<9) < STRIPE_SIZE)
  5512. /* array size does not allow a suitable chunk size */
  5513. return ERR_PTR(-EINVAL);
  5514. mddev->new_level = 5;
  5515. mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
  5516. mddev->new_chunk_sectors = chunksect;
  5517. return setup_conf(mddev);
  5518. }
  5519. static void *raid5_takeover_raid6(struct mddev *mddev)
  5520. {
  5521. int new_layout;
  5522. switch (mddev->layout) {
  5523. case ALGORITHM_LEFT_ASYMMETRIC_6:
  5524. new_layout = ALGORITHM_LEFT_ASYMMETRIC;
  5525. break;
  5526. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  5527. new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
  5528. break;
  5529. case ALGORITHM_LEFT_SYMMETRIC_6:
  5530. new_layout = ALGORITHM_LEFT_SYMMETRIC;
  5531. break;
  5532. case ALGORITHM_RIGHT_SYMMETRIC_6:
  5533. new_layout = ALGORITHM_RIGHT_SYMMETRIC;
  5534. break;
  5535. case ALGORITHM_PARITY_0_6:
  5536. new_layout = ALGORITHM_PARITY_0;
  5537. break;
  5538. case ALGORITHM_PARITY_N:
  5539. new_layout = ALGORITHM_PARITY_N;
  5540. break;
  5541. default:
  5542. return ERR_PTR(-EINVAL);
  5543. }
  5544. mddev->new_level = 5;
  5545. mddev->new_layout = new_layout;
  5546. mddev->delta_disks = -1;
  5547. mddev->raid_disks -= 1;
  5548. return setup_conf(mddev);
  5549. }
  5550. static int raid5_check_reshape(struct mddev *mddev)
  5551. {
  5552. /* For a 2-drive array, the layout and chunk size can be changed
  5553. * immediately as not restriping is needed.
  5554. * For larger arrays we record the new value - after validation
  5555. * to be used by a reshape pass.
  5556. */
  5557. struct r5conf *conf = mddev->private;
  5558. int new_chunk = mddev->new_chunk_sectors;
  5559. if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
  5560. return -EINVAL;
  5561. if (new_chunk > 0) {
  5562. if (!is_power_of_2(new_chunk))
  5563. return -EINVAL;
  5564. if (new_chunk < (PAGE_SIZE>>9))
  5565. return -EINVAL;
  5566. if (mddev->array_sectors & (new_chunk-1))
  5567. /* not factor of array size */
  5568. return -EINVAL;
  5569. }
  5570. /* They look valid */
  5571. if (mddev->raid_disks == 2) {
  5572. /* can make the change immediately */
  5573. if (mddev->new_layout >= 0) {
  5574. conf->algorithm = mddev->new_layout;
  5575. mddev->layout = mddev->new_layout;
  5576. }
  5577. if (new_chunk > 0) {
  5578. conf->chunk_sectors = new_chunk ;
  5579. mddev->chunk_sectors = new_chunk;
  5580. }
  5581. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  5582. md_wakeup_thread(mddev->thread);
  5583. }
  5584. return check_reshape(mddev);
  5585. }
  5586. static int raid6_check_reshape(struct mddev *mddev)
  5587. {
  5588. int new_chunk = mddev->new_chunk_sectors;
  5589. if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
  5590. return -EINVAL;
  5591. if (new_chunk > 0) {
  5592. if (!is_power_of_2(new_chunk))
  5593. return -EINVAL;
  5594. if (new_chunk < (PAGE_SIZE >> 9))
  5595. return -EINVAL;
  5596. if (mddev->array_sectors & (new_chunk-1))
  5597. /* not factor of array size */
  5598. return -EINVAL;
  5599. }
  5600. /* They look valid */
  5601. return check_reshape(mddev);
  5602. }
  5603. static void *raid5_takeover(struct mddev *mddev)
  5604. {
  5605. /* raid5 can take over:
  5606. * raid0 - if there is only one strip zone - make it a raid4 layout
  5607. * raid1 - if there are two drives. We need to know the chunk size
  5608. * raid4 - trivial - just use a raid4 layout.
  5609. * raid6 - Providing it is a *_6 layout
  5610. */
  5611. if (mddev->level == 0)
  5612. return raid45_takeover_raid0(mddev, 5);
  5613. if (mddev->level == 1)
  5614. return raid5_takeover_raid1(mddev);
  5615. if (mddev->level == 4) {
  5616. mddev->new_layout = ALGORITHM_PARITY_N;
  5617. mddev->new_level = 5;
  5618. return setup_conf(mddev);
  5619. }
  5620. if (mddev->level == 6)
  5621. return raid5_takeover_raid6(mddev);
  5622. return ERR_PTR(-EINVAL);
  5623. }
  5624. static void *raid4_takeover(struct mddev *mddev)
  5625. {
  5626. /* raid4 can take over:
  5627. * raid0 - if there is only one strip zone
  5628. * raid5 - if layout is right
  5629. */
  5630. if (mddev->level == 0)
  5631. return raid45_takeover_raid0(mddev, 4);
  5632. if (mddev->level == 5 &&
  5633. mddev->layout == ALGORITHM_PARITY_N) {
  5634. mddev->new_layout = 0;
  5635. mddev->new_level = 4;
  5636. return setup_conf(mddev);
  5637. }
  5638. return ERR_PTR(-EINVAL);
  5639. }
  5640. static struct md_personality raid5_personality;
  5641. static void *raid6_takeover(struct mddev *mddev)
  5642. {
  5643. /* Currently can only take over a raid5. We map the
  5644. * personality to an equivalent raid6 personality
  5645. * with the Q block at the end.
  5646. */
  5647. int new_layout;
  5648. if (mddev->pers != &raid5_personality)
  5649. return ERR_PTR(-EINVAL);
  5650. if (mddev->degraded > 1)
  5651. return ERR_PTR(-EINVAL);
  5652. if (mddev->raid_disks > 253)
  5653. return ERR_PTR(-EINVAL);
  5654. if (mddev->raid_disks < 3)
  5655. return ERR_PTR(-EINVAL);
  5656. switch (mddev->layout) {
  5657. case ALGORITHM_LEFT_ASYMMETRIC:
  5658. new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
  5659. break;
  5660. case ALGORITHM_RIGHT_ASYMMETRIC:
  5661. new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
  5662. break;
  5663. case ALGORITHM_LEFT_SYMMETRIC:
  5664. new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
  5665. break;
  5666. case ALGORITHM_RIGHT_SYMMETRIC:
  5667. new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
  5668. break;
  5669. case ALGORITHM_PARITY_0:
  5670. new_layout = ALGORITHM_PARITY_0_6;
  5671. break;
  5672. case ALGORITHM_PARITY_N:
  5673. new_layout = ALGORITHM_PARITY_N;
  5674. break;
  5675. default:
  5676. return ERR_PTR(-EINVAL);
  5677. }
  5678. mddev->new_level = 6;
  5679. mddev->new_layout = new_layout;
  5680. mddev->delta_disks = 1;
  5681. mddev->raid_disks += 1;
  5682. return setup_conf(mddev);
  5683. }
  5684. static struct md_personality raid6_personality =
  5685. {
  5686. .name = "raid6",
  5687. .level = 6,
  5688. .owner = THIS_MODULE,
  5689. .make_request = make_request,
  5690. .run = run,
  5691. .stop = stop,
  5692. .status = status,
  5693. .error_handler = error,
  5694. .hot_add_disk = raid5_add_disk,
  5695. .hot_remove_disk= raid5_remove_disk,
  5696. .spare_active = raid5_spare_active,
  5697. .sync_request = sync_request,
  5698. .resize = raid5_resize,
  5699. .size = raid5_size,
  5700. .check_reshape = raid6_check_reshape,
  5701. .start_reshape = raid5_start_reshape,
  5702. .finish_reshape = raid5_finish_reshape,
  5703. .quiesce = raid5_quiesce,
  5704. .takeover = raid6_takeover,
  5705. };
  5706. static struct md_personality raid5_personality =
  5707. {
  5708. .name = "raid5",
  5709. .level = 5,
  5710. .owner = THIS_MODULE,
  5711. .make_request = make_request,
  5712. .run = run,
  5713. .stop = stop,
  5714. .status = status,
  5715. .error_handler = error,
  5716. .hot_add_disk = raid5_add_disk,
  5717. .hot_remove_disk= raid5_remove_disk,
  5718. .spare_active = raid5_spare_active,
  5719. .sync_request = sync_request,
  5720. .resize = raid5_resize,
  5721. .size = raid5_size,
  5722. .check_reshape = raid5_check_reshape,
  5723. .start_reshape = raid5_start_reshape,
  5724. .finish_reshape = raid5_finish_reshape,
  5725. .quiesce = raid5_quiesce,
  5726. .takeover = raid5_takeover,
  5727. };
  5728. static struct md_personality raid4_personality =
  5729. {
  5730. .name = "raid4",
  5731. .level = 4,
  5732. .owner = THIS_MODULE,
  5733. .make_request = make_request,
  5734. .run = run,
  5735. .stop = stop,
  5736. .status = status,
  5737. .error_handler = error,
  5738. .hot_add_disk = raid5_add_disk,
  5739. .hot_remove_disk= raid5_remove_disk,
  5740. .spare_active = raid5_spare_active,
  5741. .sync_request = sync_request,
  5742. .resize = raid5_resize,
  5743. .size = raid5_size,
  5744. .check_reshape = raid5_check_reshape,
  5745. .start_reshape = raid5_start_reshape,
  5746. .finish_reshape = raid5_finish_reshape,
  5747. .quiesce = raid5_quiesce,
  5748. .takeover = raid4_takeover,
  5749. };
  5750. static int __init raid5_init(void)
  5751. {
  5752. register_md_personality(&raid6_personality);
  5753. register_md_personality(&raid5_personality);
  5754. register_md_personality(&raid4_personality);
  5755. return 0;
  5756. }
  5757. static void raid5_exit(void)
  5758. {
  5759. unregister_md_personality(&raid6_personality);
  5760. unregister_md_personality(&raid5_personality);
  5761. unregister_md_personality(&raid4_personality);
  5762. }
  5763. module_init(raid5_init);
  5764. module_exit(raid5_exit);
  5765. MODULE_LICENSE("GPL");
  5766. MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
  5767. MODULE_ALIAS("md-personality-4"); /* RAID5 */
  5768. MODULE_ALIAS("md-raid5");
  5769. MODULE_ALIAS("md-raid4");
  5770. MODULE_ALIAS("md-level-5");
  5771. MODULE_ALIAS("md-level-4");
  5772. MODULE_ALIAS("md-personality-8"); /* RAID6 */
  5773. MODULE_ALIAS("md-raid6");
  5774. MODULE_ALIAS("md-level-6");
  5775. /* This used to be two separate modules, they were: */
  5776. MODULE_ALIAS("raid5");
  5777. MODULE_ALIAS("raid6");