raid5.c 180 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449
  1. /*
  2. * raid5.c : Multiple Devices driver for Linux
  3. * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
  4. * Copyright (C) 1999, 2000 Ingo Molnar
  5. * Copyright (C) 2002, 2003 H. Peter Anvin
  6. *
  7. * RAID-4/5/6 management functions.
  8. * Thanks to Penguin Computing for making the RAID-6 development possible
  9. * by donating a test server!
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * (for example /usr/src/linux/COPYING); if not, write to the Free
  18. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19. */
  20. /*
  21. * BITMAP UNPLUGGING:
  22. *
  23. * The sequencing for updating the bitmap reliably is a little
  24. * subtle (and I got it wrong the first time) so it deserves some
  25. * explanation.
  26. *
  27. * We group bitmap updates into batches. Each batch has a number.
  28. * We may write out several batches at once, but that isn't very important.
  29. * conf->seq_write is the number of the last batch successfully written.
  30. * conf->seq_flush is the number of the last batch that was closed to
  31. * new additions.
  32. * When we discover that we will need to write to any block in a stripe
  33. * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
  34. * the number of the batch it will be in. This is seq_flush+1.
  35. * When we are ready to do a write, if that batch hasn't been written yet,
  36. * we plug the array and queue the stripe for later.
  37. * When an unplug happens, we increment bm_flush, thus closing the current
  38. * batch.
  39. * When we notice that bm_flush > bm_write, we write out all pending updates
  40. * to the bitmap, and advance bm_write to where bm_flush was.
  41. * This may occasionally write a bit out twice, but is sure never to
  42. * miss any bits.
  43. */
  44. #include <linux/blkdev.h>
  45. #include <linux/kthread.h>
  46. #include <linux/raid/pq.h>
  47. #include <linux/async_tx.h>
  48. #include <linux/module.h>
  49. #include <linux/async.h>
  50. #include <linux/seq_file.h>
  51. #include <linux/cpu.h>
  52. #include <linux/slab.h>
  53. #include <linux/ratelimit.h>
  54. #include "md.h"
  55. #include "raid5.h"
  56. #include "raid0.h"
  57. #include "bitmap.h"
  58. /*
  59. * Stripe cache
  60. */
  61. #define NR_STRIPES 256
  62. #define STRIPE_SIZE PAGE_SIZE
  63. #define STRIPE_SHIFT (PAGE_SHIFT - 9)
  64. #define STRIPE_SECTORS (STRIPE_SIZE>>9)
  65. #define IO_THRESHOLD 1
  66. #define BYPASS_THRESHOLD 1
  67. #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
  68. #define HASH_MASK (NR_HASH - 1)
  69. static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
  70. {
  71. int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
  72. return &conf->stripe_hashtbl[hash];
  73. }
  74. /* bio's attached to a stripe+device for I/O are linked together in bi_sector
  75. * order without overlap. There may be several bio's per stripe+device, and
  76. * a bio could span several devices.
  77. * When walking this list for a particular stripe+device, we must never proceed
  78. * beyond a bio that extends past this device, as the next bio might no longer
  79. * be valid.
  80. * This function is used to determine the 'next' bio in the list, given the sector
  81. * of the current stripe+device
  82. */
  83. static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
  84. {
  85. int sectors = bio->bi_size >> 9;
  86. if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
  87. return bio->bi_next;
  88. else
  89. return NULL;
  90. }
  91. /*
  92. * We maintain a biased count of active stripes in the bottom 16 bits of
  93. * bi_phys_segments, and a count of processed stripes in the upper 16 bits
  94. */
  95. static inline int raid5_bi_processed_stripes(struct bio *bio)
  96. {
  97. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  98. return (atomic_read(segments) >> 16) & 0xffff;
  99. }
  100. static inline int raid5_dec_bi_active_stripes(struct bio *bio)
  101. {
  102. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  103. return atomic_sub_return(1, segments) & 0xffff;
  104. }
  105. static inline void raid5_inc_bi_active_stripes(struct bio *bio)
  106. {
  107. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  108. atomic_inc(segments);
  109. }
  110. static inline void raid5_set_bi_processed_stripes(struct bio *bio,
  111. unsigned int cnt)
  112. {
  113. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  114. int old, new;
  115. do {
  116. old = atomic_read(segments);
  117. new = (old & 0xffff) | (cnt << 16);
  118. } while (atomic_cmpxchg(segments, old, new) != old);
  119. }
  120. static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
  121. {
  122. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  123. atomic_set(segments, cnt);
  124. }
  125. /* Find first data disk in a raid6 stripe */
  126. static inline int raid6_d0(struct stripe_head *sh)
  127. {
  128. if (sh->ddf_layout)
  129. /* ddf always start from first device */
  130. return 0;
  131. /* md starts just after Q block */
  132. if (sh->qd_idx == sh->disks - 1)
  133. return 0;
  134. else
  135. return sh->qd_idx + 1;
  136. }
  137. static inline int raid6_next_disk(int disk, int raid_disks)
  138. {
  139. disk++;
  140. return (disk < raid_disks) ? disk : 0;
  141. }
  142. /* When walking through the disks in a raid5, starting at raid6_d0,
  143. * We need to map each disk to a 'slot', where the data disks are slot
  144. * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
  145. * is raid_disks-1. This help does that mapping.
  146. */
  147. static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
  148. int *count, int syndrome_disks)
  149. {
  150. int slot = *count;
  151. if (sh->ddf_layout)
  152. (*count)++;
  153. if (idx == sh->pd_idx)
  154. return syndrome_disks;
  155. if (idx == sh->qd_idx)
  156. return syndrome_disks + 1;
  157. if (!sh->ddf_layout)
  158. (*count)++;
  159. return slot;
  160. }
  161. static void return_io(struct bio *return_bi)
  162. {
  163. struct bio *bi = return_bi;
  164. while (bi) {
  165. return_bi = bi->bi_next;
  166. bi->bi_next = NULL;
  167. bi->bi_size = 0;
  168. bio_endio(bi, 0);
  169. bi = return_bi;
  170. }
  171. }
  172. static void print_raid5_conf (struct r5conf *conf);
  173. static int stripe_operations_active(struct stripe_head *sh)
  174. {
  175. return sh->check_state || sh->reconstruct_state ||
  176. test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
  177. test_bit(STRIPE_COMPUTE_RUN, &sh->state);
  178. }
  179. static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
  180. {
  181. BUG_ON(!list_empty(&sh->lru));
  182. BUG_ON(atomic_read(&conf->active_stripes)==0);
  183. if (test_bit(STRIPE_HANDLE, &sh->state)) {
  184. if (test_bit(STRIPE_DELAYED, &sh->state) &&
  185. !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  186. list_add_tail(&sh->lru, &conf->delayed_list);
  187. else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
  188. sh->bm_seq - conf->seq_write > 0)
  189. list_add_tail(&sh->lru, &conf->bitmap_list);
  190. else {
  191. clear_bit(STRIPE_DELAYED, &sh->state);
  192. clear_bit(STRIPE_BIT_DELAY, &sh->state);
  193. list_add_tail(&sh->lru, &conf->handle_list);
  194. }
  195. md_wakeup_thread(conf->mddev->thread);
  196. } else {
  197. BUG_ON(stripe_operations_active(sh));
  198. if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  199. if (atomic_dec_return(&conf->preread_active_stripes)
  200. < IO_THRESHOLD)
  201. md_wakeup_thread(conf->mddev->thread);
  202. atomic_dec(&conf->active_stripes);
  203. if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
  204. list_add_tail(&sh->lru, &conf->inactive_list);
  205. wake_up(&conf->wait_for_stripe);
  206. if (conf->retry_read_aligned)
  207. md_wakeup_thread(conf->mddev->thread);
  208. }
  209. }
  210. }
  211. static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
  212. {
  213. if (atomic_dec_and_test(&sh->count))
  214. do_release_stripe(conf, sh);
  215. }
  216. static void release_stripe(struct stripe_head *sh)
  217. {
  218. struct r5conf *conf = sh->raid_conf;
  219. unsigned long flags;
  220. local_irq_save(flags);
  221. if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
  222. do_release_stripe(conf, sh);
  223. spin_unlock(&conf->device_lock);
  224. }
  225. local_irq_restore(flags);
  226. }
  227. static inline void remove_hash(struct stripe_head *sh)
  228. {
  229. pr_debug("remove_hash(), stripe %llu\n",
  230. (unsigned long long)sh->sector);
  231. hlist_del_init(&sh->hash);
  232. }
  233. static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
  234. {
  235. struct hlist_head *hp = stripe_hash(conf, sh->sector);
  236. pr_debug("insert_hash(), stripe %llu\n",
  237. (unsigned long long)sh->sector);
  238. hlist_add_head(&sh->hash, hp);
  239. }
  240. /* find an idle stripe, make sure it is unhashed, and return it. */
  241. static struct stripe_head *get_free_stripe(struct r5conf *conf)
  242. {
  243. struct stripe_head *sh = NULL;
  244. struct list_head *first;
  245. if (list_empty(&conf->inactive_list))
  246. goto out;
  247. first = conf->inactive_list.next;
  248. sh = list_entry(first, struct stripe_head, lru);
  249. list_del_init(first);
  250. remove_hash(sh);
  251. atomic_inc(&conf->active_stripes);
  252. out:
  253. return sh;
  254. }
  255. static void shrink_buffers(struct stripe_head *sh)
  256. {
  257. struct page *p;
  258. int i;
  259. int num = sh->raid_conf->pool_size;
  260. for (i = 0; i < num ; i++) {
  261. p = sh->dev[i].page;
  262. if (!p)
  263. continue;
  264. sh->dev[i].page = NULL;
  265. put_page(p);
  266. }
  267. }
  268. static int grow_buffers(struct stripe_head *sh)
  269. {
  270. int i;
  271. int num = sh->raid_conf->pool_size;
  272. for (i = 0; i < num; i++) {
  273. struct page *page;
  274. if (!(page = alloc_page(GFP_KERNEL))) {
  275. return 1;
  276. }
  277. sh->dev[i].page = page;
  278. }
  279. return 0;
  280. }
  281. static void raid5_build_block(struct stripe_head *sh, int i, int previous);
  282. static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
  283. struct stripe_head *sh);
  284. static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
  285. {
  286. struct r5conf *conf = sh->raid_conf;
  287. int i;
  288. BUG_ON(atomic_read(&sh->count) != 0);
  289. BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
  290. BUG_ON(stripe_operations_active(sh));
  291. pr_debug("init_stripe called, stripe %llu\n",
  292. (unsigned long long)sh->sector);
  293. remove_hash(sh);
  294. sh->generation = conf->generation - previous;
  295. sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
  296. sh->sector = sector;
  297. stripe_set_idx(sector, conf, previous, sh);
  298. sh->state = 0;
  299. for (i = sh->disks; i--; ) {
  300. struct r5dev *dev = &sh->dev[i];
  301. if (dev->toread || dev->read || dev->towrite || dev->written ||
  302. test_bit(R5_LOCKED, &dev->flags)) {
  303. printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
  304. (unsigned long long)sh->sector, i, dev->toread,
  305. dev->read, dev->towrite, dev->written,
  306. test_bit(R5_LOCKED, &dev->flags));
  307. WARN_ON(1);
  308. }
  309. dev->flags = 0;
  310. raid5_build_block(sh, i, previous);
  311. }
  312. insert_hash(conf, sh);
  313. }
  314. static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
  315. short generation)
  316. {
  317. struct stripe_head *sh;
  318. struct hlist_node *hn;
  319. pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
  320. hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
  321. if (sh->sector == sector && sh->generation == generation)
  322. return sh;
  323. pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
  324. return NULL;
  325. }
  326. /*
  327. * Need to check if array has failed when deciding whether to:
  328. * - start an array
  329. * - remove non-faulty devices
  330. * - add a spare
  331. * - allow a reshape
  332. * This determination is simple when no reshape is happening.
  333. * However if there is a reshape, we need to carefully check
  334. * both the before and after sections.
  335. * This is because some failed devices may only affect one
  336. * of the two sections, and some non-in_sync devices may
  337. * be insync in the section most affected by failed devices.
  338. */
  339. static int calc_degraded(struct r5conf *conf)
  340. {
  341. int degraded, degraded2;
  342. int i;
  343. rcu_read_lock();
  344. degraded = 0;
  345. for (i = 0; i < conf->previous_raid_disks; i++) {
  346. struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
  347. if (rdev && test_bit(Faulty, &rdev->flags))
  348. rdev = rcu_dereference(conf->disks[i].replacement);
  349. if (!rdev || test_bit(Faulty, &rdev->flags))
  350. degraded++;
  351. else if (test_bit(In_sync, &rdev->flags))
  352. ;
  353. else
  354. /* not in-sync or faulty.
  355. * If the reshape increases the number of devices,
  356. * this is being recovered by the reshape, so
  357. * this 'previous' section is not in_sync.
  358. * If the number of devices is being reduced however,
  359. * the device can only be part of the array if
  360. * we are reverting a reshape, so this section will
  361. * be in-sync.
  362. */
  363. if (conf->raid_disks >= conf->previous_raid_disks)
  364. degraded++;
  365. }
  366. rcu_read_unlock();
  367. if (conf->raid_disks == conf->previous_raid_disks)
  368. return degraded;
  369. rcu_read_lock();
  370. degraded2 = 0;
  371. for (i = 0; i < conf->raid_disks; i++) {
  372. struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
  373. if (rdev && test_bit(Faulty, &rdev->flags))
  374. rdev = rcu_dereference(conf->disks[i].replacement);
  375. if (!rdev || test_bit(Faulty, &rdev->flags))
  376. degraded2++;
  377. else if (test_bit(In_sync, &rdev->flags))
  378. ;
  379. else
  380. /* not in-sync or faulty.
  381. * If reshape increases the number of devices, this
  382. * section has already been recovered, else it
  383. * almost certainly hasn't.
  384. */
  385. if (conf->raid_disks <= conf->previous_raid_disks)
  386. degraded2++;
  387. }
  388. rcu_read_unlock();
  389. if (degraded2 > degraded)
  390. return degraded2;
  391. return degraded;
  392. }
  393. static int has_failed(struct r5conf *conf)
  394. {
  395. int degraded;
  396. if (conf->mddev->reshape_position == MaxSector)
  397. return conf->mddev->degraded > conf->max_degraded;
  398. degraded = calc_degraded(conf);
  399. if (degraded > conf->max_degraded)
  400. return 1;
  401. return 0;
  402. }
  403. static struct stripe_head *
  404. get_active_stripe(struct r5conf *conf, sector_t sector,
  405. int previous, int noblock, int noquiesce)
  406. {
  407. struct stripe_head *sh;
  408. pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
  409. spin_lock_irq(&conf->device_lock);
  410. do {
  411. wait_event_lock_irq(conf->wait_for_stripe,
  412. conf->quiesce == 0 || noquiesce,
  413. conf->device_lock);
  414. sh = __find_stripe(conf, sector, conf->generation - previous);
  415. if (!sh) {
  416. if (!conf->inactive_blocked)
  417. sh = get_free_stripe(conf);
  418. if (noblock && sh == NULL)
  419. break;
  420. if (!sh) {
  421. conf->inactive_blocked = 1;
  422. wait_event_lock_irq(conf->wait_for_stripe,
  423. !list_empty(&conf->inactive_list) &&
  424. (atomic_read(&conf->active_stripes)
  425. < (conf->max_nr_stripes *3/4)
  426. || !conf->inactive_blocked),
  427. conf->device_lock);
  428. conf->inactive_blocked = 0;
  429. } else
  430. init_stripe(sh, sector, previous);
  431. } else {
  432. if (atomic_read(&sh->count)) {
  433. BUG_ON(!list_empty(&sh->lru)
  434. && !test_bit(STRIPE_EXPANDING, &sh->state)
  435. && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
  436. } else {
  437. if (!test_bit(STRIPE_HANDLE, &sh->state))
  438. atomic_inc(&conf->active_stripes);
  439. if (list_empty(&sh->lru) &&
  440. !test_bit(STRIPE_EXPANDING, &sh->state))
  441. BUG();
  442. list_del_init(&sh->lru);
  443. }
  444. }
  445. } while (sh == NULL);
  446. if (sh)
  447. atomic_inc(&sh->count);
  448. spin_unlock_irq(&conf->device_lock);
  449. return sh;
  450. }
  451. /* Determine if 'data_offset' or 'new_data_offset' should be used
  452. * in this stripe_head.
  453. */
  454. static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
  455. {
  456. sector_t progress = conf->reshape_progress;
  457. /* Need a memory barrier to make sure we see the value
  458. * of conf->generation, or ->data_offset that was set before
  459. * reshape_progress was updated.
  460. */
  461. smp_rmb();
  462. if (progress == MaxSector)
  463. return 0;
  464. if (sh->generation == conf->generation - 1)
  465. return 0;
  466. /* We are in a reshape, and this is a new-generation stripe,
  467. * so use new_data_offset.
  468. */
  469. return 1;
  470. }
  471. static void
  472. raid5_end_read_request(struct bio *bi, int error);
  473. static void
  474. raid5_end_write_request(struct bio *bi, int error);
  475. static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
  476. {
  477. struct r5conf *conf = sh->raid_conf;
  478. int i, disks = sh->disks;
  479. might_sleep();
  480. for (i = disks; i--; ) {
  481. int rw;
  482. int replace_only = 0;
  483. struct bio *bi, *rbi;
  484. struct md_rdev *rdev, *rrdev = NULL;
  485. if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
  486. if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
  487. rw = WRITE_FUA;
  488. else
  489. rw = WRITE;
  490. if (test_bit(R5_Discard, &sh->dev[i].flags))
  491. rw |= REQ_DISCARD;
  492. } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
  493. rw = READ;
  494. else if (test_and_clear_bit(R5_WantReplace,
  495. &sh->dev[i].flags)) {
  496. rw = WRITE;
  497. replace_only = 1;
  498. } else
  499. continue;
  500. if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
  501. rw |= REQ_SYNC;
  502. bi = &sh->dev[i].req;
  503. rbi = &sh->dev[i].rreq; /* For writing to replacement */
  504. bi->bi_rw = rw;
  505. rbi->bi_rw = rw;
  506. if (rw & WRITE) {
  507. bi->bi_end_io = raid5_end_write_request;
  508. rbi->bi_end_io = raid5_end_write_request;
  509. } else
  510. bi->bi_end_io = raid5_end_read_request;
  511. rcu_read_lock();
  512. rrdev = rcu_dereference(conf->disks[i].replacement);
  513. smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
  514. rdev = rcu_dereference(conf->disks[i].rdev);
  515. if (!rdev) {
  516. rdev = rrdev;
  517. rrdev = NULL;
  518. }
  519. if (rw & WRITE) {
  520. if (replace_only)
  521. rdev = NULL;
  522. if (rdev == rrdev)
  523. /* We raced and saw duplicates */
  524. rrdev = NULL;
  525. } else {
  526. if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
  527. rdev = rrdev;
  528. rrdev = NULL;
  529. }
  530. if (rdev && test_bit(Faulty, &rdev->flags))
  531. rdev = NULL;
  532. if (rdev)
  533. atomic_inc(&rdev->nr_pending);
  534. if (rrdev && test_bit(Faulty, &rrdev->flags))
  535. rrdev = NULL;
  536. if (rrdev)
  537. atomic_inc(&rrdev->nr_pending);
  538. rcu_read_unlock();
  539. /* We have already checked bad blocks for reads. Now
  540. * need to check for writes. We never accept write errors
  541. * on the replacement, so we don't to check rrdev.
  542. */
  543. while ((rw & WRITE) && rdev &&
  544. test_bit(WriteErrorSeen, &rdev->flags)) {
  545. sector_t first_bad;
  546. int bad_sectors;
  547. int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
  548. &first_bad, &bad_sectors);
  549. if (!bad)
  550. break;
  551. if (bad < 0) {
  552. set_bit(BlockedBadBlocks, &rdev->flags);
  553. if (!conf->mddev->external &&
  554. conf->mddev->flags) {
  555. /* It is very unlikely, but we might
  556. * still need to write out the
  557. * bad block log - better give it
  558. * a chance*/
  559. md_check_recovery(conf->mddev);
  560. }
  561. /*
  562. * Because md_wait_for_blocked_rdev
  563. * will dec nr_pending, we must
  564. * increment it first.
  565. */
  566. atomic_inc(&rdev->nr_pending);
  567. md_wait_for_blocked_rdev(rdev, conf->mddev);
  568. } else {
  569. /* Acknowledged bad block - skip the write */
  570. rdev_dec_pending(rdev, conf->mddev);
  571. rdev = NULL;
  572. }
  573. }
  574. if (rdev) {
  575. if (s->syncing || s->expanding || s->expanded
  576. || s->replacing)
  577. md_sync_acct(rdev->bdev, STRIPE_SECTORS);
  578. set_bit(STRIPE_IO_STARTED, &sh->state);
  579. bi->bi_bdev = rdev->bdev;
  580. pr_debug("%s: for %llu schedule op %ld on disc %d\n",
  581. __func__, (unsigned long long)sh->sector,
  582. bi->bi_rw, i);
  583. atomic_inc(&sh->count);
  584. if (use_new_offset(conf, sh))
  585. bi->bi_sector = (sh->sector
  586. + rdev->new_data_offset);
  587. else
  588. bi->bi_sector = (sh->sector
  589. + rdev->data_offset);
  590. if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
  591. bi->bi_rw |= REQ_FLUSH;
  592. bi->bi_flags = 1 << BIO_UPTODATE;
  593. bi->bi_idx = 0;
  594. bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
  595. bi->bi_io_vec[0].bv_offset = 0;
  596. bi->bi_size = STRIPE_SIZE;
  597. bi->bi_next = NULL;
  598. if (rrdev)
  599. set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
  600. generic_make_request(bi);
  601. }
  602. if (rrdev) {
  603. if (s->syncing || s->expanding || s->expanded
  604. || s->replacing)
  605. md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
  606. set_bit(STRIPE_IO_STARTED, &sh->state);
  607. rbi->bi_bdev = rrdev->bdev;
  608. pr_debug("%s: for %llu schedule op %ld on "
  609. "replacement disc %d\n",
  610. __func__, (unsigned long long)sh->sector,
  611. rbi->bi_rw, i);
  612. atomic_inc(&sh->count);
  613. if (use_new_offset(conf, sh))
  614. rbi->bi_sector = (sh->sector
  615. + rrdev->new_data_offset);
  616. else
  617. rbi->bi_sector = (sh->sector
  618. + rrdev->data_offset);
  619. rbi->bi_flags = 1 << BIO_UPTODATE;
  620. rbi->bi_idx = 0;
  621. rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
  622. rbi->bi_io_vec[0].bv_offset = 0;
  623. rbi->bi_size = STRIPE_SIZE;
  624. rbi->bi_next = NULL;
  625. generic_make_request(rbi);
  626. }
  627. if (!rdev && !rrdev) {
  628. if (rw & WRITE)
  629. set_bit(STRIPE_DEGRADED, &sh->state);
  630. pr_debug("skip op %ld on disc %d for sector %llu\n",
  631. bi->bi_rw, i, (unsigned long long)sh->sector);
  632. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  633. set_bit(STRIPE_HANDLE, &sh->state);
  634. }
  635. }
  636. }
  637. static struct dma_async_tx_descriptor *
  638. async_copy_data(int frombio, struct bio *bio, struct page *page,
  639. sector_t sector, struct dma_async_tx_descriptor *tx)
  640. {
  641. struct bio_vec *bvl;
  642. struct page *bio_page;
  643. int i;
  644. int page_offset;
  645. struct async_submit_ctl submit;
  646. enum async_tx_flags flags = 0;
  647. if (bio->bi_sector >= sector)
  648. page_offset = (signed)(bio->bi_sector - sector) * 512;
  649. else
  650. page_offset = (signed)(sector - bio->bi_sector) * -512;
  651. if (frombio)
  652. flags |= ASYNC_TX_FENCE;
  653. init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
  654. bio_for_each_segment(bvl, bio, i) {
  655. int len = bvl->bv_len;
  656. int clen;
  657. int b_offset = 0;
  658. if (page_offset < 0) {
  659. b_offset = -page_offset;
  660. page_offset += b_offset;
  661. len -= b_offset;
  662. }
  663. if (len > 0 && page_offset + len > STRIPE_SIZE)
  664. clen = STRIPE_SIZE - page_offset;
  665. else
  666. clen = len;
  667. if (clen > 0) {
  668. b_offset += bvl->bv_offset;
  669. bio_page = bvl->bv_page;
  670. if (frombio)
  671. tx = async_memcpy(page, bio_page, page_offset,
  672. b_offset, clen, &submit);
  673. else
  674. tx = async_memcpy(bio_page, page, b_offset,
  675. page_offset, clen, &submit);
  676. }
  677. /* chain the operations */
  678. submit.depend_tx = tx;
  679. if (clen < len) /* hit end of page */
  680. break;
  681. page_offset += len;
  682. }
  683. return tx;
  684. }
  685. static void ops_complete_biofill(void *stripe_head_ref)
  686. {
  687. struct stripe_head *sh = stripe_head_ref;
  688. struct bio *return_bi = NULL;
  689. int i;
  690. pr_debug("%s: stripe %llu\n", __func__,
  691. (unsigned long long)sh->sector);
  692. /* clear completed biofills */
  693. for (i = sh->disks; i--; ) {
  694. struct r5dev *dev = &sh->dev[i];
  695. /* acknowledge completion of a biofill operation */
  696. /* and check if we need to reply to a read request,
  697. * new R5_Wantfill requests are held off until
  698. * !STRIPE_BIOFILL_RUN
  699. */
  700. if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
  701. struct bio *rbi, *rbi2;
  702. BUG_ON(!dev->read);
  703. rbi = dev->read;
  704. dev->read = NULL;
  705. while (rbi && rbi->bi_sector <
  706. dev->sector + STRIPE_SECTORS) {
  707. rbi2 = r5_next_bio(rbi, dev->sector);
  708. if (!raid5_dec_bi_active_stripes(rbi)) {
  709. rbi->bi_next = return_bi;
  710. return_bi = rbi;
  711. }
  712. rbi = rbi2;
  713. }
  714. }
  715. }
  716. clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
  717. return_io(return_bi);
  718. set_bit(STRIPE_HANDLE, &sh->state);
  719. release_stripe(sh);
  720. }
  721. static void ops_run_biofill(struct stripe_head *sh)
  722. {
  723. struct dma_async_tx_descriptor *tx = NULL;
  724. struct async_submit_ctl submit;
  725. int i;
  726. pr_debug("%s: stripe %llu\n", __func__,
  727. (unsigned long long)sh->sector);
  728. for (i = sh->disks; i--; ) {
  729. struct r5dev *dev = &sh->dev[i];
  730. if (test_bit(R5_Wantfill, &dev->flags)) {
  731. struct bio *rbi;
  732. spin_lock_irq(&sh->stripe_lock);
  733. dev->read = rbi = dev->toread;
  734. dev->toread = NULL;
  735. spin_unlock_irq(&sh->stripe_lock);
  736. while (rbi && rbi->bi_sector <
  737. dev->sector + STRIPE_SECTORS) {
  738. tx = async_copy_data(0, rbi, dev->page,
  739. dev->sector, tx);
  740. rbi = r5_next_bio(rbi, dev->sector);
  741. }
  742. }
  743. }
  744. atomic_inc(&sh->count);
  745. init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
  746. async_trigger_callback(&submit);
  747. }
  748. static void mark_target_uptodate(struct stripe_head *sh, int target)
  749. {
  750. struct r5dev *tgt;
  751. if (target < 0)
  752. return;
  753. tgt = &sh->dev[target];
  754. set_bit(R5_UPTODATE, &tgt->flags);
  755. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  756. clear_bit(R5_Wantcompute, &tgt->flags);
  757. }
  758. static void ops_complete_compute(void *stripe_head_ref)
  759. {
  760. struct stripe_head *sh = stripe_head_ref;
  761. pr_debug("%s: stripe %llu\n", __func__,
  762. (unsigned long long)sh->sector);
  763. /* mark the computed target(s) as uptodate */
  764. mark_target_uptodate(sh, sh->ops.target);
  765. mark_target_uptodate(sh, sh->ops.target2);
  766. clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
  767. if (sh->check_state == check_state_compute_run)
  768. sh->check_state = check_state_compute_result;
  769. set_bit(STRIPE_HANDLE, &sh->state);
  770. release_stripe(sh);
  771. }
  772. /* return a pointer to the address conversion region of the scribble buffer */
  773. static addr_conv_t *to_addr_conv(struct stripe_head *sh,
  774. struct raid5_percpu *percpu)
  775. {
  776. return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
  777. }
  778. static struct dma_async_tx_descriptor *
  779. ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
  780. {
  781. int disks = sh->disks;
  782. struct page **xor_srcs = percpu->scribble;
  783. int target = sh->ops.target;
  784. struct r5dev *tgt = &sh->dev[target];
  785. struct page *xor_dest = tgt->page;
  786. int count = 0;
  787. struct dma_async_tx_descriptor *tx;
  788. struct async_submit_ctl submit;
  789. int i;
  790. pr_debug("%s: stripe %llu block: %d\n",
  791. __func__, (unsigned long long)sh->sector, target);
  792. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  793. for (i = disks; i--; )
  794. if (i != target)
  795. xor_srcs[count++] = sh->dev[i].page;
  796. atomic_inc(&sh->count);
  797. init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
  798. ops_complete_compute, sh, to_addr_conv(sh, percpu));
  799. if (unlikely(count == 1))
  800. tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
  801. else
  802. tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
  803. return tx;
  804. }
  805. /* set_syndrome_sources - populate source buffers for gen_syndrome
  806. * @srcs - (struct page *) array of size sh->disks
  807. * @sh - stripe_head to parse
  808. *
  809. * Populates srcs in proper layout order for the stripe and returns the
  810. * 'count' of sources to be used in a call to async_gen_syndrome. The P
  811. * destination buffer is recorded in srcs[count] and the Q destination
  812. * is recorded in srcs[count+1]].
  813. */
  814. static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
  815. {
  816. int disks = sh->disks;
  817. int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
  818. int d0_idx = raid6_d0(sh);
  819. int count;
  820. int i;
  821. for (i = 0; i < disks; i++)
  822. srcs[i] = NULL;
  823. count = 0;
  824. i = d0_idx;
  825. do {
  826. int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
  827. srcs[slot] = sh->dev[i].page;
  828. i = raid6_next_disk(i, disks);
  829. } while (i != d0_idx);
  830. return syndrome_disks;
  831. }
  832. static struct dma_async_tx_descriptor *
  833. ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
  834. {
  835. int disks = sh->disks;
  836. struct page **blocks = percpu->scribble;
  837. int target;
  838. int qd_idx = sh->qd_idx;
  839. struct dma_async_tx_descriptor *tx;
  840. struct async_submit_ctl submit;
  841. struct r5dev *tgt;
  842. struct page *dest;
  843. int i;
  844. int count;
  845. if (sh->ops.target < 0)
  846. target = sh->ops.target2;
  847. else if (sh->ops.target2 < 0)
  848. target = sh->ops.target;
  849. else
  850. /* we should only have one valid target */
  851. BUG();
  852. BUG_ON(target < 0);
  853. pr_debug("%s: stripe %llu block: %d\n",
  854. __func__, (unsigned long long)sh->sector, target);
  855. tgt = &sh->dev[target];
  856. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  857. dest = tgt->page;
  858. atomic_inc(&sh->count);
  859. if (target == qd_idx) {
  860. count = set_syndrome_sources(blocks, sh);
  861. blocks[count] = NULL; /* regenerating p is not necessary */
  862. BUG_ON(blocks[count+1] != dest); /* q should already be set */
  863. init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
  864. ops_complete_compute, sh,
  865. to_addr_conv(sh, percpu));
  866. tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
  867. } else {
  868. /* Compute any data- or p-drive using XOR */
  869. count = 0;
  870. for (i = disks; i-- ; ) {
  871. if (i == target || i == qd_idx)
  872. continue;
  873. blocks[count++] = sh->dev[i].page;
  874. }
  875. init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
  876. NULL, ops_complete_compute, sh,
  877. to_addr_conv(sh, percpu));
  878. tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
  879. }
  880. return tx;
  881. }
  882. static struct dma_async_tx_descriptor *
  883. ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
  884. {
  885. int i, count, disks = sh->disks;
  886. int syndrome_disks = sh->ddf_layout ? disks : disks-2;
  887. int d0_idx = raid6_d0(sh);
  888. int faila = -1, failb = -1;
  889. int target = sh->ops.target;
  890. int target2 = sh->ops.target2;
  891. struct r5dev *tgt = &sh->dev[target];
  892. struct r5dev *tgt2 = &sh->dev[target2];
  893. struct dma_async_tx_descriptor *tx;
  894. struct page **blocks = percpu->scribble;
  895. struct async_submit_ctl submit;
  896. pr_debug("%s: stripe %llu block1: %d block2: %d\n",
  897. __func__, (unsigned long long)sh->sector, target, target2);
  898. BUG_ON(target < 0 || target2 < 0);
  899. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  900. BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
  901. /* we need to open-code set_syndrome_sources to handle the
  902. * slot number conversion for 'faila' and 'failb'
  903. */
  904. for (i = 0; i < disks ; i++)
  905. blocks[i] = NULL;
  906. count = 0;
  907. i = d0_idx;
  908. do {
  909. int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
  910. blocks[slot] = sh->dev[i].page;
  911. if (i == target)
  912. faila = slot;
  913. if (i == target2)
  914. failb = slot;
  915. i = raid6_next_disk(i, disks);
  916. } while (i != d0_idx);
  917. BUG_ON(faila == failb);
  918. if (failb < faila)
  919. swap(faila, failb);
  920. pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
  921. __func__, (unsigned long long)sh->sector, faila, failb);
  922. atomic_inc(&sh->count);
  923. if (failb == syndrome_disks+1) {
  924. /* Q disk is one of the missing disks */
  925. if (faila == syndrome_disks) {
  926. /* Missing P+Q, just recompute */
  927. init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
  928. ops_complete_compute, sh,
  929. to_addr_conv(sh, percpu));
  930. return async_gen_syndrome(blocks, 0, syndrome_disks+2,
  931. STRIPE_SIZE, &submit);
  932. } else {
  933. struct page *dest;
  934. int data_target;
  935. int qd_idx = sh->qd_idx;
  936. /* Missing D+Q: recompute D from P, then recompute Q */
  937. if (target == qd_idx)
  938. data_target = target2;
  939. else
  940. data_target = target;
  941. count = 0;
  942. for (i = disks; i-- ; ) {
  943. if (i == data_target || i == qd_idx)
  944. continue;
  945. blocks[count++] = sh->dev[i].page;
  946. }
  947. dest = sh->dev[data_target].page;
  948. init_async_submit(&submit,
  949. ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
  950. NULL, NULL, NULL,
  951. to_addr_conv(sh, percpu));
  952. tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
  953. &submit);
  954. count = set_syndrome_sources(blocks, sh);
  955. init_async_submit(&submit, ASYNC_TX_FENCE, tx,
  956. ops_complete_compute, sh,
  957. to_addr_conv(sh, percpu));
  958. return async_gen_syndrome(blocks, 0, count+2,
  959. STRIPE_SIZE, &submit);
  960. }
  961. } else {
  962. init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
  963. ops_complete_compute, sh,
  964. to_addr_conv(sh, percpu));
  965. if (failb == syndrome_disks) {
  966. /* We're missing D+P. */
  967. return async_raid6_datap_recov(syndrome_disks+2,
  968. STRIPE_SIZE, faila,
  969. blocks, &submit);
  970. } else {
  971. /* We're missing D+D. */
  972. return async_raid6_2data_recov(syndrome_disks+2,
  973. STRIPE_SIZE, faila, failb,
  974. blocks, &submit);
  975. }
  976. }
  977. }
  978. static void ops_complete_prexor(void *stripe_head_ref)
  979. {
  980. struct stripe_head *sh = stripe_head_ref;
  981. pr_debug("%s: stripe %llu\n", __func__,
  982. (unsigned long long)sh->sector);
  983. }
  984. static struct dma_async_tx_descriptor *
  985. ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
  986. struct dma_async_tx_descriptor *tx)
  987. {
  988. int disks = sh->disks;
  989. struct page **xor_srcs = percpu->scribble;
  990. int count = 0, pd_idx = sh->pd_idx, i;
  991. struct async_submit_ctl submit;
  992. /* existing parity data subtracted */
  993. struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
  994. pr_debug("%s: stripe %llu\n", __func__,
  995. (unsigned long long)sh->sector);
  996. for (i = disks; i--; ) {
  997. struct r5dev *dev = &sh->dev[i];
  998. /* Only process blocks that are known to be uptodate */
  999. if (test_bit(R5_Wantdrain, &dev->flags))
  1000. xor_srcs[count++] = dev->page;
  1001. }
  1002. init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
  1003. ops_complete_prexor, sh, to_addr_conv(sh, percpu));
  1004. tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
  1005. return tx;
  1006. }
  1007. static struct dma_async_tx_descriptor *
  1008. ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
  1009. {
  1010. int disks = sh->disks;
  1011. int i;
  1012. pr_debug("%s: stripe %llu\n", __func__,
  1013. (unsigned long long)sh->sector);
  1014. for (i = disks; i--; ) {
  1015. struct r5dev *dev = &sh->dev[i];
  1016. struct bio *chosen;
  1017. if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
  1018. struct bio *wbi;
  1019. spin_lock_irq(&sh->stripe_lock);
  1020. chosen = dev->towrite;
  1021. dev->towrite = NULL;
  1022. BUG_ON(dev->written);
  1023. wbi = dev->written = chosen;
  1024. spin_unlock_irq(&sh->stripe_lock);
  1025. while (wbi && wbi->bi_sector <
  1026. dev->sector + STRIPE_SECTORS) {
  1027. if (wbi->bi_rw & REQ_FUA)
  1028. set_bit(R5_WantFUA, &dev->flags);
  1029. if (wbi->bi_rw & REQ_SYNC)
  1030. set_bit(R5_SyncIO, &dev->flags);
  1031. if (wbi->bi_rw & REQ_DISCARD)
  1032. set_bit(R5_Discard, &dev->flags);
  1033. else
  1034. tx = async_copy_data(1, wbi, dev->page,
  1035. dev->sector, tx);
  1036. wbi = r5_next_bio(wbi, dev->sector);
  1037. }
  1038. }
  1039. }
  1040. return tx;
  1041. }
  1042. static void ops_complete_reconstruct(void *stripe_head_ref)
  1043. {
  1044. struct stripe_head *sh = stripe_head_ref;
  1045. int disks = sh->disks;
  1046. int pd_idx = sh->pd_idx;
  1047. int qd_idx = sh->qd_idx;
  1048. int i;
  1049. bool fua = false, sync = false, discard = false;
  1050. pr_debug("%s: stripe %llu\n", __func__,
  1051. (unsigned long long)sh->sector);
  1052. for (i = disks; i--; ) {
  1053. fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
  1054. sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
  1055. discard |= test_bit(R5_Discard, &sh->dev[i].flags);
  1056. }
  1057. for (i = disks; i--; ) {
  1058. struct r5dev *dev = &sh->dev[i];
  1059. if (dev->written || i == pd_idx || i == qd_idx) {
  1060. if (!discard)
  1061. set_bit(R5_UPTODATE, &dev->flags);
  1062. if (fua)
  1063. set_bit(R5_WantFUA, &dev->flags);
  1064. if (sync)
  1065. set_bit(R5_SyncIO, &dev->flags);
  1066. }
  1067. }
  1068. if (sh->reconstruct_state == reconstruct_state_drain_run)
  1069. sh->reconstruct_state = reconstruct_state_drain_result;
  1070. else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
  1071. sh->reconstruct_state = reconstruct_state_prexor_drain_result;
  1072. else {
  1073. BUG_ON(sh->reconstruct_state != reconstruct_state_run);
  1074. sh->reconstruct_state = reconstruct_state_result;
  1075. }
  1076. set_bit(STRIPE_HANDLE, &sh->state);
  1077. release_stripe(sh);
  1078. }
  1079. static void
  1080. ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
  1081. struct dma_async_tx_descriptor *tx)
  1082. {
  1083. int disks = sh->disks;
  1084. struct page **xor_srcs = percpu->scribble;
  1085. struct async_submit_ctl submit;
  1086. int count = 0, pd_idx = sh->pd_idx, i;
  1087. struct page *xor_dest;
  1088. int prexor = 0;
  1089. unsigned long flags;
  1090. pr_debug("%s: stripe %llu\n", __func__,
  1091. (unsigned long long)sh->sector);
  1092. for (i = 0; i < sh->disks; i++) {
  1093. if (pd_idx == i)
  1094. continue;
  1095. if (!test_bit(R5_Discard, &sh->dev[i].flags))
  1096. break;
  1097. }
  1098. if (i >= sh->disks) {
  1099. atomic_inc(&sh->count);
  1100. set_bit(R5_Discard, &sh->dev[pd_idx].flags);
  1101. ops_complete_reconstruct(sh);
  1102. return;
  1103. }
  1104. /* check if prexor is active which means only process blocks
  1105. * that are part of a read-modify-write (written)
  1106. */
  1107. if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
  1108. prexor = 1;
  1109. xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
  1110. for (i = disks; i--; ) {
  1111. struct r5dev *dev = &sh->dev[i];
  1112. if (dev->written)
  1113. xor_srcs[count++] = dev->page;
  1114. }
  1115. } else {
  1116. xor_dest = sh->dev[pd_idx].page;
  1117. for (i = disks; i--; ) {
  1118. struct r5dev *dev = &sh->dev[i];
  1119. if (i != pd_idx)
  1120. xor_srcs[count++] = dev->page;
  1121. }
  1122. }
  1123. /* 1/ if we prexor'd then the dest is reused as a source
  1124. * 2/ if we did not prexor then we are redoing the parity
  1125. * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
  1126. * for the synchronous xor case
  1127. */
  1128. flags = ASYNC_TX_ACK |
  1129. (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
  1130. atomic_inc(&sh->count);
  1131. init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
  1132. to_addr_conv(sh, percpu));
  1133. if (unlikely(count == 1))
  1134. tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
  1135. else
  1136. tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
  1137. }
  1138. static void
  1139. ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
  1140. struct dma_async_tx_descriptor *tx)
  1141. {
  1142. struct async_submit_ctl submit;
  1143. struct page **blocks = percpu->scribble;
  1144. int count, i;
  1145. pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
  1146. for (i = 0; i < sh->disks; i++) {
  1147. if (sh->pd_idx == i || sh->qd_idx == i)
  1148. continue;
  1149. if (!test_bit(R5_Discard, &sh->dev[i].flags))
  1150. break;
  1151. }
  1152. if (i >= sh->disks) {
  1153. atomic_inc(&sh->count);
  1154. set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
  1155. set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
  1156. ops_complete_reconstruct(sh);
  1157. return;
  1158. }
  1159. count = set_syndrome_sources(blocks, sh);
  1160. atomic_inc(&sh->count);
  1161. init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
  1162. sh, to_addr_conv(sh, percpu));
  1163. async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
  1164. }
  1165. static void ops_complete_check(void *stripe_head_ref)
  1166. {
  1167. struct stripe_head *sh = stripe_head_ref;
  1168. pr_debug("%s: stripe %llu\n", __func__,
  1169. (unsigned long long)sh->sector);
  1170. sh->check_state = check_state_check_result;
  1171. set_bit(STRIPE_HANDLE, &sh->state);
  1172. release_stripe(sh);
  1173. }
  1174. static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
  1175. {
  1176. int disks = sh->disks;
  1177. int pd_idx = sh->pd_idx;
  1178. int qd_idx = sh->qd_idx;
  1179. struct page *xor_dest;
  1180. struct page **xor_srcs = percpu->scribble;
  1181. struct dma_async_tx_descriptor *tx;
  1182. struct async_submit_ctl submit;
  1183. int count;
  1184. int i;
  1185. pr_debug("%s: stripe %llu\n", __func__,
  1186. (unsigned long long)sh->sector);
  1187. count = 0;
  1188. xor_dest = sh->dev[pd_idx].page;
  1189. xor_srcs[count++] = xor_dest;
  1190. for (i = disks; i--; ) {
  1191. if (i == pd_idx || i == qd_idx)
  1192. continue;
  1193. xor_srcs[count++] = sh->dev[i].page;
  1194. }
  1195. init_async_submit(&submit, 0, NULL, NULL, NULL,
  1196. to_addr_conv(sh, percpu));
  1197. tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
  1198. &sh->ops.zero_sum_result, &submit);
  1199. atomic_inc(&sh->count);
  1200. init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
  1201. tx = async_trigger_callback(&submit);
  1202. }
  1203. static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
  1204. {
  1205. struct page **srcs = percpu->scribble;
  1206. struct async_submit_ctl submit;
  1207. int count;
  1208. pr_debug("%s: stripe %llu checkp: %d\n", __func__,
  1209. (unsigned long long)sh->sector, checkp);
  1210. count = set_syndrome_sources(srcs, sh);
  1211. if (!checkp)
  1212. srcs[count] = NULL;
  1213. atomic_inc(&sh->count);
  1214. init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
  1215. sh, to_addr_conv(sh, percpu));
  1216. async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
  1217. &sh->ops.zero_sum_result, percpu->spare_page, &submit);
  1218. }
  1219. static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
  1220. {
  1221. int overlap_clear = 0, i, disks = sh->disks;
  1222. struct dma_async_tx_descriptor *tx = NULL;
  1223. struct r5conf *conf = sh->raid_conf;
  1224. int level = conf->level;
  1225. struct raid5_percpu *percpu;
  1226. unsigned long cpu;
  1227. cpu = get_cpu();
  1228. percpu = per_cpu_ptr(conf->percpu, cpu);
  1229. if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
  1230. ops_run_biofill(sh);
  1231. overlap_clear++;
  1232. }
  1233. if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
  1234. if (level < 6)
  1235. tx = ops_run_compute5(sh, percpu);
  1236. else {
  1237. if (sh->ops.target2 < 0 || sh->ops.target < 0)
  1238. tx = ops_run_compute6_1(sh, percpu);
  1239. else
  1240. tx = ops_run_compute6_2(sh, percpu);
  1241. }
  1242. /* terminate the chain if reconstruct is not set to be run */
  1243. if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
  1244. async_tx_ack(tx);
  1245. }
  1246. if (test_bit(STRIPE_OP_PREXOR, &ops_request))
  1247. tx = ops_run_prexor(sh, percpu, tx);
  1248. if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
  1249. tx = ops_run_biodrain(sh, tx);
  1250. overlap_clear++;
  1251. }
  1252. if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
  1253. if (level < 6)
  1254. ops_run_reconstruct5(sh, percpu, tx);
  1255. else
  1256. ops_run_reconstruct6(sh, percpu, tx);
  1257. }
  1258. if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
  1259. if (sh->check_state == check_state_run)
  1260. ops_run_check_p(sh, percpu);
  1261. else if (sh->check_state == check_state_run_q)
  1262. ops_run_check_pq(sh, percpu, 0);
  1263. else if (sh->check_state == check_state_run_pq)
  1264. ops_run_check_pq(sh, percpu, 1);
  1265. else
  1266. BUG();
  1267. }
  1268. if (overlap_clear)
  1269. for (i = disks; i--; ) {
  1270. struct r5dev *dev = &sh->dev[i];
  1271. if (test_and_clear_bit(R5_Overlap, &dev->flags))
  1272. wake_up(&sh->raid_conf->wait_for_overlap);
  1273. }
  1274. put_cpu();
  1275. }
  1276. #ifdef CONFIG_MULTICORE_RAID456
  1277. static void async_run_ops(void *param, async_cookie_t cookie)
  1278. {
  1279. struct stripe_head *sh = param;
  1280. unsigned long ops_request = sh->ops.request;
  1281. clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
  1282. wake_up(&sh->ops.wait_for_ops);
  1283. __raid_run_ops(sh, ops_request);
  1284. release_stripe(sh);
  1285. }
  1286. static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
  1287. {
  1288. /* since handle_stripe can be called outside of raid5d context
  1289. * we need to ensure sh->ops.request is de-staged before another
  1290. * request arrives
  1291. */
  1292. wait_event(sh->ops.wait_for_ops,
  1293. !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
  1294. sh->ops.request = ops_request;
  1295. atomic_inc(&sh->count);
  1296. async_schedule(async_run_ops, sh);
  1297. }
  1298. #else
  1299. #define raid_run_ops __raid_run_ops
  1300. #endif
  1301. static int grow_one_stripe(struct r5conf *conf)
  1302. {
  1303. struct stripe_head *sh;
  1304. sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
  1305. if (!sh)
  1306. return 0;
  1307. sh->raid_conf = conf;
  1308. #ifdef CONFIG_MULTICORE_RAID456
  1309. init_waitqueue_head(&sh->ops.wait_for_ops);
  1310. #endif
  1311. spin_lock_init(&sh->stripe_lock);
  1312. if (grow_buffers(sh)) {
  1313. shrink_buffers(sh);
  1314. kmem_cache_free(conf->slab_cache, sh);
  1315. return 0;
  1316. }
  1317. /* we just created an active stripe so... */
  1318. atomic_set(&sh->count, 1);
  1319. atomic_inc(&conf->active_stripes);
  1320. INIT_LIST_HEAD(&sh->lru);
  1321. release_stripe(sh);
  1322. return 1;
  1323. }
  1324. static int grow_stripes(struct r5conf *conf, int num)
  1325. {
  1326. struct kmem_cache *sc;
  1327. int devs = max(conf->raid_disks, conf->previous_raid_disks);
  1328. if (conf->mddev->gendisk)
  1329. sprintf(conf->cache_name[0],
  1330. "raid%d-%s", conf->level, mdname(conf->mddev));
  1331. else
  1332. sprintf(conf->cache_name[0],
  1333. "raid%d-%p", conf->level, conf->mddev);
  1334. sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
  1335. conf->active_name = 0;
  1336. sc = kmem_cache_create(conf->cache_name[conf->active_name],
  1337. sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
  1338. 0, 0, NULL);
  1339. if (!sc)
  1340. return 1;
  1341. conf->slab_cache = sc;
  1342. conf->pool_size = devs;
  1343. while (num--)
  1344. if (!grow_one_stripe(conf))
  1345. return 1;
  1346. return 0;
  1347. }
  1348. /**
  1349. * scribble_len - return the required size of the scribble region
  1350. * @num - total number of disks in the array
  1351. *
  1352. * The size must be enough to contain:
  1353. * 1/ a struct page pointer for each device in the array +2
  1354. * 2/ room to convert each entry in (1) to its corresponding dma
  1355. * (dma_map_page()) or page (page_address()) address.
  1356. *
  1357. * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
  1358. * calculate over all devices (not just the data blocks), using zeros in place
  1359. * of the P and Q blocks.
  1360. */
  1361. static size_t scribble_len(int num)
  1362. {
  1363. size_t len;
  1364. len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
  1365. return len;
  1366. }
  1367. static int resize_stripes(struct r5conf *conf, int newsize)
  1368. {
  1369. /* Make all the stripes able to hold 'newsize' devices.
  1370. * New slots in each stripe get 'page' set to a new page.
  1371. *
  1372. * This happens in stages:
  1373. * 1/ create a new kmem_cache and allocate the required number of
  1374. * stripe_heads.
  1375. * 2/ gather all the old stripe_heads and transfer the pages across
  1376. * to the new stripe_heads. This will have the side effect of
  1377. * freezing the array as once all stripe_heads have been collected,
  1378. * no IO will be possible. Old stripe heads are freed once their
  1379. * pages have been transferred over, and the old kmem_cache is
  1380. * freed when all stripes are done.
  1381. * 3/ reallocate conf->disks to be suitable bigger. If this fails,
  1382. * we simple return a failre status - no need to clean anything up.
  1383. * 4/ allocate new pages for the new slots in the new stripe_heads.
  1384. * If this fails, we don't bother trying the shrink the
  1385. * stripe_heads down again, we just leave them as they are.
  1386. * As each stripe_head is processed the new one is released into
  1387. * active service.
  1388. *
  1389. * Once step2 is started, we cannot afford to wait for a write,
  1390. * so we use GFP_NOIO allocations.
  1391. */
  1392. struct stripe_head *osh, *nsh;
  1393. LIST_HEAD(newstripes);
  1394. struct disk_info *ndisks;
  1395. unsigned long cpu;
  1396. int err;
  1397. struct kmem_cache *sc;
  1398. int i;
  1399. if (newsize <= conf->pool_size)
  1400. return 0; /* never bother to shrink */
  1401. err = md_allow_write(conf->mddev);
  1402. if (err)
  1403. return err;
  1404. /* Step 1 */
  1405. sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
  1406. sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
  1407. 0, 0, NULL);
  1408. if (!sc)
  1409. return -ENOMEM;
  1410. for (i = conf->max_nr_stripes; i; i--) {
  1411. nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
  1412. if (!nsh)
  1413. break;
  1414. nsh->raid_conf = conf;
  1415. #ifdef CONFIG_MULTICORE_RAID456
  1416. init_waitqueue_head(&nsh->ops.wait_for_ops);
  1417. #endif
  1418. spin_lock_init(&nsh->stripe_lock);
  1419. list_add(&nsh->lru, &newstripes);
  1420. }
  1421. if (i) {
  1422. /* didn't get enough, give up */
  1423. while (!list_empty(&newstripes)) {
  1424. nsh = list_entry(newstripes.next, struct stripe_head, lru);
  1425. list_del(&nsh->lru);
  1426. kmem_cache_free(sc, nsh);
  1427. }
  1428. kmem_cache_destroy(sc);
  1429. return -ENOMEM;
  1430. }
  1431. /* Step 2 - Must use GFP_NOIO now.
  1432. * OK, we have enough stripes, start collecting inactive
  1433. * stripes and copying them over
  1434. */
  1435. list_for_each_entry(nsh, &newstripes, lru) {
  1436. spin_lock_irq(&conf->device_lock);
  1437. wait_event_lock_irq(conf->wait_for_stripe,
  1438. !list_empty(&conf->inactive_list),
  1439. conf->device_lock);
  1440. osh = get_free_stripe(conf);
  1441. spin_unlock_irq(&conf->device_lock);
  1442. atomic_set(&nsh->count, 1);
  1443. for(i=0; i<conf->pool_size; i++)
  1444. nsh->dev[i].page = osh->dev[i].page;
  1445. for( ; i<newsize; i++)
  1446. nsh->dev[i].page = NULL;
  1447. kmem_cache_free(conf->slab_cache, osh);
  1448. }
  1449. kmem_cache_destroy(conf->slab_cache);
  1450. /* Step 3.
  1451. * At this point, we are holding all the stripes so the array
  1452. * is completely stalled, so now is a good time to resize
  1453. * conf->disks and the scribble region
  1454. */
  1455. ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
  1456. if (ndisks) {
  1457. for (i=0; i<conf->raid_disks; i++)
  1458. ndisks[i] = conf->disks[i];
  1459. kfree(conf->disks);
  1460. conf->disks = ndisks;
  1461. } else
  1462. err = -ENOMEM;
  1463. get_online_cpus();
  1464. conf->scribble_len = scribble_len(newsize);
  1465. for_each_present_cpu(cpu) {
  1466. struct raid5_percpu *percpu;
  1467. void *scribble;
  1468. percpu = per_cpu_ptr(conf->percpu, cpu);
  1469. scribble = kmalloc(conf->scribble_len, GFP_NOIO);
  1470. if (scribble) {
  1471. kfree(percpu->scribble);
  1472. percpu->scribble = scribble;
  1473. } else {
  1474. err = -ENOMEM;
  1475. break;
  1476. }
  1477. }
  1478. put_online_cpus();
  1479. /* Step 4, return new stripes to service */
  1480. while(!list_empty(&newstripes)) {
  1481. nsh = list_entry(newstripes.next, struct stripe_head, lru);
  1482. list_del_init(&nsh->lru);
  1483. for (i=conf->raid_disks; i < newsize; i++)
  1484. if (nsh->dev[i].page == NULL) {
  1485. struct page *p = alloc_page(GFP_NOIO);
  1486. nsh->dev[i].page = p;
  1487. if (!p)
  1488. err = -ENOMEM;
  1489. }
  1490. release_stripe(nsh);
  1491. }
  1492. /* critical section pass, GFP_NOIO no longer needed */
  1493. conf->slab_cache = sc;
  1494. conf->active_name = 1-conf->active_name;
  1495. conf->pool_size = newsize;
  1496. return err;
  1497. }
  1498. static int drop_one_stripe(struct r5conf *conf)
  1499. {
  1500. struct stripe_head *sh;
  1501. spin_lock_irq(&conf->device_lock);
  1502. sh = get_free_stripe(conf);
  1503. spin_unlock_irq(&conf->device_lock);
  1504. if (!sh)
  1505. return 0;
  1506. BUG_ON(atomic_read(&sh->count));
  1507. shrink_buffers(sh);
  1508. kmem_cache_free(conf->slab_cache, sh);
  1509. atomic_dec(&conf->active_stripes);
  1510. return 1;
  1511. }
  1512. static void shrink_stripes(struct r5conf *conf)
  1513. {
  1514. while (drop_one_stripe(conf))
  1515. ;
  1516. if (conf->slab_cache)
  1517. kmem_cache_destroy(conf->slab_cache);
  1518. conf->slab_cache = NULL;
  1519. }
  1520. static void raid5_end_read_request(struct bio * bi, int error)
  1521. {
  1522. struct stripe_head *sh = bi->bi_private;
  1523. struct r5conf *conf = sh->raid_conf;
  1524. int disks = sh->disks, i;
  1525. int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
  1526. char b[BDEVNAME_SIZE];
  1527. struct md_rdev *rdev = NULL;
  1528. sector_t s;
  1529. for (i=0 ; i<disks; i++)
  1530. if (bi == &sh->dev[i].req)
  1531. break;
  1532. pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
  1533. (unsigned long long)sh->sector, i, atomic_read(&sh->count),
  1534. uptodate);
  1535. if (i == disks) {
  1536. BUG();
  1537. return;
  1538. }
  1539. if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
  1540. /* If replacement finished while this request was outstanding,
  1541. * 'replacement' might be NULL already.
  1542. * In that case it moved down to 'rdev'.
  1543. * rdev is not removed until all requests are finished.
  1544. */
  1545. rdev = conf->disks[i].replacement;
  1546. if (!rdev)
  1547. rdev = conf->disks[i].rdev;
  1548. if (use_new_offset(conf, sh))
  1549. s = sh->sector + rdev->new_data_offset;
  1550. else
  1551. s = sh->sector + rdev->data_offset;
  1552. if (uptodate) {
  1553. set_bit(R5_UPTODATE, &sh->dev[i].flags);
  1554. if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
  1555. /* Note that this cannot happen on a
  1556. * replacement device. We just fail those on
  1557. * any error
  1558. */
  1559. printk_ratelimited(
  1560. KERN_INFO
  1561. "md/raid:%s: read error corrected"
  1562. " (%lu sectors at %llu on %s)\n",
  1563. mdname(conf->mddev), STRIPE_SECTORS,
  1564. (unsigned long long)s,
  1565. bdevname(rdev->bdev, b));
  1566. atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
  1567. clear_bit(R5_ReadError, &sh->dev[i].flags);
  1568. clear_bit(R5_ReWrite, &sh->dev[i].flags);
  1569. } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
  1570. clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
  1571. if (atomic_read(&rdev->read_errors))
  1572. atomic_set(&rdev->read_errors, 0);
  1573. } else {
  1574. const char *bdn = bdevname(rdev->bdev, b);
  1575. int retry = 0;
  1576. int set_bad = 0;
  1577. clear_bit(R5_UPTODATE, &sh->dev[i].flags);
  1578. atomic_inc(&rdev->read_errors);
  1579. if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
  1580. printk_ratelimited(
  1581. KERN_WARNING
  1582. "md/raid:%s: read error on replacement device "
  1583. "(sector %llu on %s).\n",
  1584. mdname(conf->mddev),
  1585. (unsigned long long)s,
  1586. bdn);
  1587. else if (conf->mddev->degraded >= conf->max_degraded) {
  1588. set_bad = 1;
  1589. printk_ratelimited(
  1590. KERN_WARNING
  1591. "md/raid:%s: read error not correctable "
  1592. "(sector %llu on %s).\n",
  1593. mdname(conf->mddev),
  1594. (unsigned long long)s,
  1595. bdn);
  1596. } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
  1597. /* Oh, no!!! */
  1598. set_bad = 1;
  1599. printk_ratelimited(
  1600. KERN_WARNING
  1601. "md/raid:%s: read error NOT corrected!! "
  1602. "(sector %llu on %s).\n",
  1603. mdname(conf->mddev),
  1604. (unsigned long long)s,
  1605. bdn);
  1606. } else if (atomic_read(&rdev->read_errors)
  1607. > conf->max_nr_stripes)
  1608. printk(KERN_WARNING
  1609. "md/raid:%s: Too many read errors, failing device %s.\n",
  1610. mdname(conf->mddev), bdn);
  1611. else
  1612. retry = 1;
  1613. if (retry)
  1614. if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
  1615. set_bit(R5_ReadError, &sh->dev[i].flags);
  1616. clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
  1617. } else
  1618. set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
  1619. else {
  1620. clear_bit(R5_ReadError, &sh->dev[i].flags);
  1621. clear_bit(R5_ReWrite, &sh->dev[i].flags);
  1622. if (!(set_bad
  1623. && test_bit(In_sync, &rdev->flags)
  1624. && rdev_set_badblocks(
  1625. rdev, sh->sector, STRIPE_SECTORS, 0)))
  1626. md_error(conf->mddev, rdev);
  1627. }
  1628. }
  1629. rdev_dec_pending(rdev, conf->mddev);
  1630. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  1631. set_bit(STRIPE_HANDLE, &sh->state);
  1632. release_stripe(sh);
  1633. }
  1634. static void raid5_end_write_request(struct bio *bi, int error)
  1635. {
  1636. struct stripe_head *sh = bi->bi_private;
  1637. struct r5conf *conf = sh->raid_conf;
  1638. int disks = sh->disks, i;
  1639. struct md_rdev *uninitialized_var(rdev);
  1640. int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
  1641. sector_t first_bad;
  1642. int bad_sectors;
  1643. int replacement = 0;
  1644. for (i = 0 ; i < disks; i++) {
  1645. if (bi == &sh->dev[i].req) {
  1646. rdev = conf->disks[i].rdev;
  1647. break;
  1648. }
  1649. if (bi == &sh->dev[i].rreq) {
  1650. rdev = conf->disks[i].replacement;
  1651. if (rdev)
  1652. replacement = 1;
  1653. else
  1654. /* rdev was removed and 'replacement'
  1655. * replaced it. rdev is not removed
  1656. * until all requests are finished.
  1657. */
  1658. rdev = conf->disks[i].rdev;
  1659. break;
  1660. }
  1661. }
  1662. pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
  1663. (unsigned long long)sh->sector, i, atomic_read(&sh->count),
  1664. uptodate);
  1665. if (i == disks) {
  1666. BUG();
  1667. return;
  1668. }
  1669. if (replacement) {
  1670. if (!uptodate)
  1671. md_error(conf->mddev, rdev);
  1672. else if (is_badblock(rdev, sh->sector,
  1673. STRIPE_SECTORS,
  1674. &first_bad, &bad_sectors))
  1675. set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
  1676. } else {
  1677. if (!uptodate) {
  1678. set_bit(WriteErrorSeen, &rdev->flags);
  1679. set_bit(R5_WriteError, &sh->dev[i].flags);
  1680. if (!test_and_set_bit(WantReplacement, &rdev->flags))
  1681. set_bit(MD_RECOVERY_NEEDED,
  1682. &rdev->mddev->recovery);
  1683. } else if (is_badblock(rdev, sh->sector,
  1684. STRIPE_SECTORS,
  1685. &first_bad, &bad_sectors))
  1686. set_bit(R5_MadeGood, &sh->dev[i].flags);
  1687. }
  1688. rdev_dec_pending(rdev, conf->mddev);
  1689. if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
  1690. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  1691. set_bit(STRIPE_HANDLE, &sh->state);
  1692. release_stripe(sh);
  1693. }
  1694. static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
  1695. static void raid5_build_block(struct stripe_head *sh, int i, int previous)
  1696. {
  1697. struct r5dev *dev = &sh->dev[i];
  1698. bio_init(&dev->req);
  1699. dev->req.bi_io_vec = &dev->vec;
  1700. dev->req.bi_vcnt++;
  1701. dev->req.bi_max_vecs++;
  1702. dev->req.bi_private = sh;
  1703. dev->vec.bv_page = dev->page;
  1704. bio_init(&dev->rreq);
  1705. dev->rreq.bi_io_vec = &dev->rvec;
  1706. dev->rreq.bi_vcnt++;
  1707. dev->rreq.bi_max_vecs++;
  1708. dev->rreq.bi_private = sh;
  1709. dev->rvec.bv_page = dev->page;
  1710. dev->flags = 0;
  1711. dev->sector = compute_blocknr(sh, i, previous);
  1712. }
  1713. static void error(struct mddev *mddev, struct md_rdev *rdev)
  1714. {
  1715. char b[BDEVNAME_SIZE];
  1716. struct r5conf *conf = mddev->private;
  1717. unsigned long flags;
  1718. pr_debug("raid456: error called\n");
  1719. spin_lock_irqsave(&conf->device_lock, flags);
  1720. clear_bit(In_sync, &rdev->flags);
  1721. mddev->degraded = calc_degraded(conf);
  1722. spin_unlock_irqrestore(&conf->device_lock, flags);
  1723. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1724. set_bit(Blocked, &rdev->flags);
  1725. set_bit(Faulty, &rdev->flags);
  1726. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1727. printk(KERN_ALERT
  1728. "md/raid:%s: Disk failure on %s, disabling device.\n"
  1729. "md/raid:%s: Operation continuing on %d devices.\n",
  1730. mdname(mddev),
  1731. bdevname(rdev->bdev, b),
  1732. mdname(mddev),
  1733. conf->raid_disks - mddev->degraded);
  1734. }
  1735. /*
  1736. * Input: a 'big' sector number,
  1737. * Output: index of the data and parity disk, and the sector # in them.
  1738. */
  1739. static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
  1740. int previous, int *dd_idx,
  1741. struct stripe_head *sh)
  1742. {
  1743. sector_t stripe, stripe2;
  1744. sector_t chunk_number;
  1745. unsigned int chunk_offset;
  1746. int pd_idx, qd_idx;
  1747. int ddf_layout = 0;
  1748. sector_t new_sector;
  1749. int algorithm = previous ? conf->prev_algo
  1750. : conf->algorithm;
  1751. int sectors_per_chunk = previous ? conf->prev_chunk_sectors
  1752. : conf->chunk_sectors;
  1753. int raid_disks = previous ? conf->previous_raid_disks
  1754. : conf->raid_disks;
  1755. int data_disks = raid_disks - conf->max_degraded;
  1756. /* First compute the information on this sector */
  1757. /*
  1758. * Compute the chunk number and the sector offset inside the chunk
  1759. */
  1760. chunk_offset = sector_div(r_sector, sectors_per_chunk);
  1761. chunk_number = r_sector;
  1762. /*
  1763. * Compute the stripe number
  1764. */
  1765. stripe = chunk_number;
  1766. *dd_idx = sector_div(stripe, data_disks);
  1767. stripe2 = stripe;
  1768. /*
  1769. * Select the parity disk based on the user selected algorithm.
  1770. */
  1771. pd_idx = qd_idx = -1;
  1772. switch(conf->level) {
  1773. case 4:
  1774. pd_idx = data_disks;
  1775. break;
  1776. case 5:
  1777. switch (algorithm) {
  1778. case ALGORITHM_LEFT_ASYMMETRIC:
  1779. pd_idx = data_disks - sector_div(stripe2, raid_disks);
  1780. if (*dd_idx >= pd_idx)
  1781. (*dd_idx)++;
  1782. break;
  1783. case ALGORITHM_RIGHT_ASYMMETRIC:
  1784. pd_idx = sector_div(stripe2, raid_disks);
  1785. if (*dd_idx >= pd_idx)
  1786. (*dd_idx)++;
  1787. break;
  1788. case ALGORITHM_LEFT_SYMMETRIC:
  1789. pd_idx = data_disks - sector_div(stripe2, raid_disks);
  1790. *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
  1791. break;
  1792. case ALGORITHM_RIGHT_SYMMETRIC:
  1793. pd_idx = sector_div(stripe2, raid_disks);
  1794. *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
  1795. break;
  1796. case ALGORITHM_PARITY_0:
  1797. pd_idx = 0;
  1798. (*dd_idx)++;
  1799. break;
  1800. case ALGORITHM_PARITY_N:
  1801. pd_idx = data_disks;
  1802. break;
  1803. default:
  1804. BUG();
  1805. }
  1806. break;
  1807. case 6:
  1808. switch (algorithm) {
  1809. case ALGORITHM_LEFT_ASYMMETRIC:
  1810. pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
  1811. qd_idx = pd_idx + 1;
  1812. if (pd_idx == raid_disks-1) {
  1813. (*dd_idx)++; /* Q D D D P */
  1814. qd_idx = 0;
  1815. } else if (*dd_idx >= pd_idx)
  1816. (*dd_idx) += 2; /* D D P Q D */
  1817. break;
  1818. case ALGORITHM_RIGHT_ASYMMETRIC:
  1819. pd_idx = sector_div(stripe2, raid_disks);
  1820. qd_idx = pd_idx + 1;
  1821. if (pd_idx == raid_disks-1) {
  1822. (*dd_idx)++; /* Q D D D P */
  1823. qd_idx = 0;
  1824. } else if (*dd_idx >= pd_idx)
  1825. (*dd_idx) += 2; /* D D P Q D */
  1826. break;
  1827. case ALGORITHM_LEFT_SYMMETRIC:
  1828. pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
  1829. qd_idx = (pd_idx + 1) % raid_disks;
  1830. *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
  1831. break;
  1832. case ALGORITHM_RIGHT_SYMMETRIC:
  1833. pd_idx = sector_div(stripe2, raid_disks);
  1834. qd_idx = (pd_idx + 1) % raid_disks;
  1835. *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
  1836. break;
  1837. case ALGORITHM_PARITY_0:
  1838. pd_idx = 0;
  1839. qd_idx = 1;
  1840. (*dd_idx) += 2;
  1841. break;
  1842. case ALGORITHM_PARITY_N:
  1843. pd_idx = data_disks;
  1844. qd_idx = data_disks + 1;
  1845. break;
  1846. case ALGORITHM_ROTATING_ZERO_RESTART:
  1847. /* Exactly the same as RIGHT_ASYMMETRIC, but or
  1848. * of blocks for computing Q is different.
  1849. */
  1850. pd_idx = sector_div(stripe2, raid_disks);
  1851. qd_idx = pd_idx + 1;
  1852. if (pd_idx == raid_disks-1) {
  1853. (*dd_idx)++; /* Q D D D P */
  1854. qd_idx = 0;
  1855. } else if (*dd_idx >= pd_idx)
  1856. (*dd_idx) += 2; /* D D P Q D */
  1857. ddf_layout = 1;
  1858. break;
  1859. case ALGORITHM_ROTATING_N_RESTART:
  1860. /* Same a left_asymmetric, by first stripe is
  1861. * D D D P Q rather than
  1862. * Q D D D P
  1863. */
  1864. stripe2 += 1;
  1865. pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
  1866. qd_idx = pd_idx + 1;
  1867. if (pd_idx == raid_disks-1) {
  1868. (*dd_idx)++; /* Q D D D P */
  1869. qd_idx = 0;
  1870. } else if (*dd_idx >= pd_idx)
  1871. (*dd_idx) += 2; /* D D P Q D */
  1872. ddf_layout = 1;
  1873. break;
  1874. case ALGORITHM_ROTATING_N_CONTINUE:
  1875. /* Same as left_symmetric but Q is before P */
  1876. pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
  1877. qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
  1878. *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
  1879. ddf_layout = 1;
  1880. break;
  1881. case ALGORITHM_LEFT_ASYMMETRIC_6:
  1882. /* RAID5 left_asymmetric, with Q on last device */
  1883. pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
  1884. if (*dd_idx >= pd_idx)
  1885. (*dd_idx)++;
  1886. qd_idx = raid_disks - 1;
  1887. break;
  1888. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  1889. pd_idx = sector_div(stripe2, raid_disks-1);
  1890. if (*dd_idx >= pd_idx)
  1891. (*dd_idx)++;
  1892. qd_idx = raid_disks - 1;
  1893. break;
  1894. case ALGORITHM_LEFT_SYMMETRIC_6:
  1895. pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
  1896. *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
  1897. qd_idx = raid_disks - 1;
  1898. break;
  1899. case ALGORITHM_RIGHT_SYMMETRIC_6:
  1900. pd_idx = sector_div(stripe2, raid_disks-1);
  1901. *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
  1902. qd_idx = raid_disks - 1;
  1903. break;
  1904. case ALGORITHM_PARITY_0_6:
  1905. pd_idx = 0;
  1906. (*dd_idx)++;
  1907. qd_idx = raid_disks - 1;
  1908. break;
  1909. default:
  1910. BUG();
  1911. }
  1912. break;
  1913. }
  1914. if (sh) {
  1915. sh->pd_idx = pd_idx;
  1916. sh->qd_idx = qd_idx;
  1917. sh->ddf_layout = ddf_layout;
  1918. }
  1919. /*
  1920. * Finally, compute the new sector number
  1921. */
  1922. new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
  1923. return new_sector;
  1924. }
  1925. static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
  1926. {
  1927. struct r5conf *conf = sh->raid_conf;
  1928. int raid_disks = sh->disks;
  1929. int data_disks = raid_disks - conf->max_degraded;
  1930. sector_t new_sector = sh->sector, check;
  1931. int sectors_per_chunk = previous ? conf->prev_chunk_sectors
  1932. : conf->chunk_sectors;
  1933. int algorithm = previous ? conf->prev_algo
  1934. : conf->algorithm;
  1935. sector_t stripe;
  1936. int chunk_offset;
  1937. sector_t chunk_number;
  1938. int dummy1, dd_idx = i;
  1939. sector_t r_sector;
  1940. struct stripe_head sh2;
  1941. chunk_offset = sector_div(new_sector, sectors_per_chunk);
  1942. stripe = new_sector;
  1943. if (i == sh->pd_idx)
  1944. return 0;
  1945. switch(conf->level) {
  1946. case 4: break;
  1947. case 5:
  1948. switch (algorithm) {
  1949. case ALGORITHM_LEFT_ASYMMETRIC:
  1950. case ALGORITHM_RIGHT_ASYMMETRIC:
  1951. if (i > sh->pd_idx)
  1952. i--;
  1953. break;
  1954. case ALGORITHM_LEFT_SYMMETRIC:
  1955. case ALGORITHM_RIGHT_SYMMETRIC:
  1956. if (i < sh->pd_idx)
  1957. i += raid_disks;
  1958. i -= (sh->pd_idx + 1);
  1959. break;
  1960. case ALGORITHM_PARITY_0:
  1961. i -= 1;
  1962. break;
  1963. case ALGORITHM_PARITY_N:
  1964. break;
  1965. default:
  1966. BUG();
  1967. }
  1968. break;
  1969. case 6:
  1970. if (i == sh->qd_idx)
  1971. return 0; /* It is the Q disk */
  1972. switch (algorithm) {
  1973. case ALGORITHM_LEFT_ASYMMETRIC:
  1974. case ALGORITHM_RIGHT_ASYMMETRIC:
  1975. case ALGORITHM_ROTATING_ZERO_RESTART:
  1976. case ALGORITHM_ROTATING_N_RESTART:
  1977. if (sh->pd_idx == raid_disks-1)
  1978. i--; /* Q D D D P */
  1979. else if (i > sh->pd_idx)
  1980. i -= 2; /* D D P Q D */
  1981. break;
  1982. case ALGORITHM_LEFT_SYMMETRIC:
  1983. case ALGORITHM_RIGHT_SYMMETRIC:
  1984. if (sh->pd_idx == raid_disks-1)
  1985. i--; /* Q D D D P */
  1986. else {
  1987. /* D D P Q D */
  1988. if (i < sh->pd_idx)
  1989. i += raid_disks;
  1990. i -= (sh->pd_idx + 2);
  1991. }
  1992. break;
  1993. case ALGORITHM_PARITY_0:
  1994. i -= 2;
  1995. break;
  1996. case ALGORITHM_PARITY_N:
  1997. break;
  1998. case ALGORITHM_ROTATING_N_CONTINUE:
  1999. /* Like left_symmetric, but P is before Q */
  2000. if (sh->pd_idx == 0)
  2001. i--; /* P D D D Q */
  2002. else {
  2003. /* D D Q P D */
  2004. if (i < sh->pd_idx)
  2005. i += raid_disks;
  2006. i -= (sh->pd_idx + 1);
  2007. }
  2008. break;
  2009. case ALGORITHM_LEFT_ASYMMETRIC_6:
  2010. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  2011. if (i > sh->pd_idx)
  2012. i--;
  2013. break;
  2014. case ALGORITHM_LEFT_SYMMETRIC_6:
  2015. case ALGORITHM_RIGHT_SYMMETRIC_6:
  2016. if (i < sh->pd_idx)
  2017. i += data_disks + 1;
  2018. i -= (sh->pd_idx + 1);
  2019. break;
  2020. case ALGORITHM_PARITY_0_6:
  2021. i -= 1;
  2022. break;
  2023. default:
  2024. BUG();
  2025. }
  2026. break;
  2027. }
  2028. chunk_number = stripe * data_disks + i;
  2029. r_sector = chunk_number * sectors_per_chunk + chunk_offset;
  2030. check = raid5_compute_sector(conf, r_sector,
  2031. previous, &dummy1, &sh2);
  2032. if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
  2033. || sh2.qd_idx != sh->qd_idx) {
  2034. printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
  2035. mdname(conf->mddev));
  2036. return 0;
  2037. }
  2038. return r_sector;
  2039. }
  2040. static void
  2041. schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
  2042. int rcw, int expand)
  2043. {
  2044. int i, pd_idx = sh->pd_idx, disks = sh->disks;
  2045. struct r5conf *conf = sh->raid_conf;
  2046. int level = conf->level;
  2047. if (rcw) {
  2048. /* if we are not expanding this is a proper write request, and
  2049. * there will be bios with new data to be drained into the
  2050. * stripe cache
  2051. */
  2052. if (!expand) {
  2053. sh->reconstruct_state = reconstruct_state_drain_run;
  2054. set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
  2055. } else
  2056. sh->reconstruct_state = reconstruct_state_run;
  2057. set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
  2058. for (i = disks; i--; ) {
  2059. struct r5dev *dev = &sh->dev[i];
  2060. if (dev->towrite) {
  2061. set_bit(R5_LOCKED, &dev->flags);
  2062. set_bit(R5_Wantdrain, &dev->flags);
  2063. if (!expand)
  2064. clear_bit(R5_UPTODATE, &dev->flags);
  2065. s->locked++;
  2066. }
  2067. }
  2068. if (s->locked + conf->max_degraded == disks)
  2069. if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
  2070. atomic_inc(&conf->pending_full_writes);
  2071. } else {
  2072. BUG_ON(level == 6);
  2073. BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
  2074. test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
  2075. sh->reconstruct_state = reconstruct_state_prexor_drain_run;
  2076. set_bit(STRIPE_OP_PREXOR, &s->ops_request);
  2077. set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
  2078. set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
  2079. for (i = disks; i--; ) {
  2080. struct r5dev *dev = &sh->dev[i];
  2081. if (i == pd_idx)
  2082. continue;
  2083. if (dev->towrite &&
  2084. (test_bit(R5_UPTODATE, &dev->flags) ||
  2085. test_bit(R5_Wantcompute, &dev->flags))) {
  2086. set_bit(R5_Wantdrain, &dev->flags);
  2087. set_bit(R5_LOCKED, &dev->flags);
  2088. clear_bit(R5_UPTODATE, &dev->flags);
  2089. s->locked++;
  2090. }
  2091. }
  2092. }
  2093. /* keep the parity disk(s) locked while asynchronous operations
  2094. * are in flight
  2095. */
  2096. set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
  2097. clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
  2098. s->locked++;
  2099. if (level == 6) {
  2100. int qd_idx = sh->qd_idx;
  2101. struct r5dev *dev = &sh->dev[qd_idx];
  2102. set_bit(R5_LOCKED, &dev->flags);
  2103. clear_bit(R5_UPTODATE, &dev->flags);
  2104. s->locked++;
  2105. }
  2106. pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
  2107. __func__, (unsigned long long)sh->sector,
  2108. s->locked, s->ops_request);
  2109. }
  2110. /*
  2111. * Each stripe/dev can have one or more bion attached.
  2112. * toread/towrite point to the first in a chain.
  2113. * The bi_next chain must be in order.
  2114. */
  2115. static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
  2116. {
  2117. struct bio **bip;
  2118. struct r5conf *conf = sh->raid_conf;
  2119. int firstwrite=0;
  2120. pr_debug("adding bi b#%llu to stripe s#%llu\n",
  2121. (unsigned long long)bi->bi_sector,
  2122. (unsigned long long)sh->sector);
  2123. /*
  2124. * If several bio share a stripe. The bio bi_phys_segments acts as a
  2125. * reference count to avoid race. The reference count should already be
  2126. * increased before this function is called (for example, in
  2127. * make_request()), so other bio sharing this stripe will not free the
  2128. * stripe. If a stripe is owned by one stripe, the stripe lock will
  2129. * protect it.
  2130. */
  2131. spin_lock_irq(&sh->stripe_lock);
  2132. if (forwrite) {
  2133. bip = &sh->dev[dd_idx].towrite;
  2134. if (*bip == NULL)
  2135. firstwrite = 1;
  2136. } else
  2137. bip = &sh->dev[dd_idx].toread;
  2138. while (*bip && (*bip)->bi_sector < bi->bi_sector) {
  2139. if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
  2140. goto overlap;
  2141. bip = & (*bip)->bi_next;
  2142. }
  2143. if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
  2144. goto overlap;
  2145. BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
  2146. if (*bip)
  2147. bi->bi_next = *bip;
  2148. *bip = bi;
  2149. raid5_inc_bi_active_stripes(bi);
  2150. if (forwrite) {
  2151. /* check if page is covered */
  2152. sector_t sector = sh->dev[dd_idx].sector;
  2153. for (bi=sh->dev[dd_idx].towrite;
  2154. sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
  2155. bi && bi->bi_sector <= sector;
  2156. bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
  2157. if (bi->bi_sector + (bi->bi_size>>9) >= sector)
  2158. sector = bi->bi_sector + (bi->bi_size>>9);
  2159. }
  2160. if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
  2161. set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
  2162. }
  2163. pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
  2164. (unsigned long long)(*bip)->bi_sector,
  2165. (unsigned long long)sh->sector, dd_idx);
  2166. spin_unlock_irq(&sh->stripe_lock);
  2167. if (conf->mddev->bitmap && firstwrite) {
  2168. bitmap_startwrite(conf->mddev->bitmap, sh->sector,
  2169. STRIPE_SECTORS, 0);
  2170. sh->bm_seq = conf->seq_flush+1;
  2171. set_bit(STRIPE_BIT_DELAY, &sh->state);
  2172. }
  2173. return 1;
  2174. overlap:
  2175. set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
  2176. spin_unlock_irq(&sh->stripe_lock);
  2177. return 0;
  2178. }
  2179. static void end_reshape(struct r5conf *conf);
  2180. static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
  2181. struct stripe_head *sh)
  2182. {
  2183. int sectors_per_chunk =
  2184. previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
  2185. int dd_idx;
  2186. int chunk_offset = sector_div(stripe, sectors_per_chunk);
  2187. int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
  2188. raid5_compute_sector(conf,
  2189. stripe * (disks - conf->max_degraded)
  2190. *sectors_per_chunk + chunk_offset,
  2191. previous,
  2192. &dd_idx, sh);
  2193. }
  2194. static void
  2195. handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
  2196. struct stripe_head_state *s, int disks,
  2197. struct bio **return_bi)
  2198. {
  2199. int i;
  2200. for (i = disks; i--; ) {
  2201. struct bio *bi;
  2202. int bitmap_end = 0;
  2203. if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
  2204. struct md_rdev *rdev;
  2205. rcu_read_lock();
  2206. rdev = rcu_dereference(conf->disks[i].rdev);
  2207. if (rdev && test_bit(In_sync, &rdev->flags))
  2208. atomic_inc(&rdev->nr_pending);
  2209. else
  2210. rdev = NULL;
  2211. rcu_read_unlock();
  2212. if (rdev) {
  2213. if (!rdev_set_badblocks(
  2214. rdev,
  2215. sh->sector,
  2216. STRIPE_SECTORS, 0))
  2217. md_error(conf->mddev, rdev);
  2218. rdev_dec_pending(rdev, conf->mddev);
  2219. }
  2220. }
  2221. spin_lock_irq(&sh->stripe_lock);
  2222. /* fail all writes first */
  2223. bi = sh->dev[i].towrite;
  2224. sh->dev[i].towrite = NULL;
  2225. spin_unlock_irq(&sh->stripe_lock);
  2226. if (bi)
  2227. bitmap_end = 1;
  2228. if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
  2229. wake_up(&conf->wait_for_overlap);
  2230. while (bi && bi->bi_sector <
  2231. sh->dev[i].sector + STRIPE_SECTORS) {
  2232. struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
  2233. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  2234. if (!raid5_dec_bi_active_stripes(bi)) {
  2235. md_write_end(conf->mddev);
  2236. bi->bi_next = *return_bi;
  2237. *return_bi = bi;
  2238. }
  2239. bi = nextbi;
  2240. }
  2241. if (bitmap_end)
  2242. bitmap_endwrite(conf->mddev->bitmap, sh->sector,
  2243. STRIPE_SECTORS, 0, 0);
  2244. bitmap_end = 0;
  2245. /* and fail all 'written' */
  2246. bi = sh->dev[i].written;
  2247. sh->dev[i].written = NULL;
  2248. if (bi) bitmap_end = 1;
  2249. while (bi && bi->bi_sector <
  2250. sh->dev[i].sector + STRIPE_SECTORS) {
  2251. struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
  2252. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  2253. if (!raid5_dec_bi_active_stripes(bi)) {
  2254. md_write_end(conf->mddev);
  2255. bi->bi_next = *return_bi;
  2256. *return_bi = bi;
  2257. }
  2258. bi = bi2;
  2259. }
  2260. /* fail any reads if this device is non-operational and
  2261. * the data has not reached the cache yet.
  2262. */
  2263. if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
  2264. (!test_bit(R5_Insync, &sh->dev[i].flags) ||
  2265. test_bit(R5_ReadError, &sh->dev[i].flags))) {
  2266. spin_lock_irq(&sh->stripe_lock);
  2267. bi = sh->dev[i].toread;
  2268. sh->dev[i].toread = NULL;
  2269. spin_unlock_irq(&sh->stripe_lock);
  2270. if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
  2271. wake_up(&conf->wait_for_overlap);
  2272. while (bi && bi->bi_sector <
  2273. sh->dev[i].sector + STRIPE_SECTORS) {
  2274. struct bio *nextbi =
  2275. r5_next_bio(bi, sh->dev[i].sector);
  2276. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  2277. if (!raid5_dec_bi_active_stripes(bi)) {
  2278. bi->bi_next = *return_bi;
  2279. *return_bi = bi;
  2280. }
  2281. bi = nextbi;
  2282. }
  2283. }
  2284. if (bitmap_end)
  2285. bitmap_endwrite(conf->mddev->bitmap, sh->sector,
  2286. STRIPE_SECTORS, 0, 0);
  2287. /* If we were in the middle of a write the parity block might
  2288. * still be locked - so just clear all R5_LOCKED flags
  2289. */
  2290. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  2291. }
  2292. if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
  2293. if (atomic_dec_and_test(&conf->pending_full_writes))
  2294. md_wakeup_thread(conf->mddev->thread);
  2295. }
  2296. static void
  2297. handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
  2298. struct stripe_head_state *s)
  2299. {
  2300. int abort = 0;
  2301. int i;
  2302. clear_bit(STRIPE_SYNCING, &sh->state);
  2303. s->syncing = 0;
  2304. s->replacing = 0;
  2305. /* There is nothing more to do for sync/check/repair.
  2306. * Don't even need to abort as that is handled elsewhere
  2307. * if needed, and not always wanted e.g. if there is a known
  2308. * bad block here.
  2309. * For recover/replace we need to record a bad block on all
  2310. * non-sync devices, or abort the recovery
  2311. */
  2312. if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
  2313. /* During recovery devices cannot be removed, so
  2314. * locking and refcounting of rdevs is not needed
  2315. */
  2316. for (i = 0; i < conf->raid_disks; i++) {
  2317. struct md_rdev *rdev = conf->disks[i].rdev;
  2318. if (rdev
  2319. && !test_bit(Faulty, &rdev->flags)
  2320. && !test_bit(In_sync, &rdev->flags)
  2321. && !rdev_set_badblocks(rdev, sh->sector,
  2322. STRIPE_SECTORS, 0))
  2323. abort = 1;
  2324. rdev = conf->disks[i].replacement;
  2325. if (rdev
  2326. && !test_bit(Faulty, &rdev->flags)
  2327. && !test_bit(In_sync, &rdev->flags)
  2328. && !rdev_set_badblocks(rdev, sh->sector,
  2329. STRIPE_SECTORS, 0))
  2330. abort = 1;
  2331. }
  2332. if (abort)
  2333. conf->recovery_disabled =
  2334. conf->mddev->recovery_disabled;
  2335. }
  2336. md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
  2337. }
  2338. static int want_replace(struct stripe_head *sh, int disk_idx)
  2339. {
  2340. struct md_rdev *rdev;
  2341. int rv = 0;
  2342. /* Doing recovery so rcu locking not required */
  2343. rdev = sh->raid_conf->disks[disk_idx].replacement;
  2344. if (rdev
  2345. && !test_bit(Faulty, &rdev->flags)
  2346. && !test_bit(In_sync, &rdev->flags)
  2347. && (rdev->recovery_offset <= sh->sector
  2348. || rdev->mddev->recovery_cp <= sh->sector))
  2349. rv = 1;
  2350. return rv;
  2351. }
  2352. /* fetch_block - checks the given member device to see if its data needs
  2353. * to be read or computed to satisfy a request.
  2354. *
  2355. * Returns 1 when no more member devices need to be checked, otherwise returns
  2356. * 0 to tell the loop in handle_stripe_fill to continue
  2357. */
  2358. static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
  2359. int disk_idx, int disks)
  2360. {
  2361. struct r5dev *dev = &sh->dev[disk_idx];
  2362. struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
  2363. &sh->dev[s->failed_num[1]] };
  2364. /* is the data in this block needed, and can we get it? */
  2365. if (!test_bit(R5_LOCKED, &dev->flags) &&
  2366. !test_bit(R5_UPTODATE, &dev->flags) &&
  2367. (dev->toread ||
  2368. (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
  2369. s->syncing || s->expanding ||
  2370. (s->replacing && want_replace(sh, disk_idx)) ||
  2371. (s->failed >= 1 && fdev[0]->toread) ||
  2372. (s->failed >= 2 && fdev[1]->toread) ||
  2373. (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
  2374. !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
  2375. (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
  2376. /* we would like to get this block, possibly by computing it,
  2377. * otherwise read it if the backing disk is insync
  2378. */
  2379. BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
  2380. BUG_ON(test_bit(R5_Wantread, &dev->flags));
  2381. if ((s->uptodate == disks - 1) &&
  2382. (s->failed && (disk_idx == s->failed_num[0] ||
  2383. disk_idx == s->failed_num[1]))) {
  2384. /* have disk failed, and we're requested to fetch it;
  2385. * do compute it
  2386. */
  2387. pr_debug("Computing stripe %llu block %d\n",
  2388. (unsigned long long)sh->sector, disk_idx);
  2389. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  2390. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  2391. set_bit(R5_Wantcompute, &dev->flags);
  2392. sh->ops.target = disk_idx;
  2393. sh->ops.target2 = -1; /* no 2nd target */
  2394. s->req_compute = 1;
  2395. /* Careful: from this point on 'uptodate' is in the eye
  2396. * of raid_run_ops which services 'compute' operations
  2397. * before writes. R5_Wantcompute flags a block that will
  2398. * be R5_UPTODATE by the time it is needed for a
  2399. * subsequent operation.
  2400. */
  2401. s->uptodate++;
  2402. return 1;
  2403. } else if (s->uptodate == disks-2 && s->failed >= 2) {
  2404. /* Computing 2-failure is *very* expensive; only
  2405. * do it if failed >= 2
  2406. */
  2407. int other;
  2408. for (other = disks; other--; ) {
  2409. if (other == disk_idx)
  2410. continue;
  2411. if (!test_bit(R5_UPTODATE,
  2412. &sh->dev[other].flags))
  2413. break;
  2414. }
  2415. BUG_ON(other < 0);
  2416. pr_debug("Computing stripe %llu blocks %d,%d\n",
  2417. (unsigned long long)sh->sector,
  2418. disk_idx, other);
  2419. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  2420. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  2421. set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
  2422. set_bit(R5_Wantcompute, &sh->dev[other].flags);
  2423. sh->ops.target = disk_idx;
  2424. sh->ops.target2 = other;
  2425. s->uptodate += 2;
  2426. s->req_compute = 1;
  2427. return 1;
  2428. } else if (test_bit(R5_Insync, &dev->flags)) {
  2429. set_bit(R5_LOCKED, &dev->flags);
  2430. set_bit(R5_Wantread, &dev->flags);
  2431. s->locked++;
  2432. pr_debug("Reading block %d (sync=%d)\n",
  2433. disk_idx, s->syncing);
  2434. }
  2435. }
  2436. return 0;
  2437. }
  2438. /**
  2439. * handle_stripe_fill - read or compute data to satisfy pending requests.
  2440. */
  2441. static void handle_stripe_fill(struct stripe_head *sh,
  2442. struct stripe_head_state *s,
  2443. int disks)
  2444. {
  2445. int i;
  2446. /* look for blocks to read/compute, skip this if a compute
  2447. * is already in flight, or if the stripe contents are in the
  2448. * midst of changing due to a write
  2449. */
  2450. if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
  2451. !sh->reconstruct_state)
  2452. for (i = disks; i--; )
  2453. if (fetch_block(sh, s, i, disks))
  2454. break;
  2455. set_bit(STRIPE_HANDLE, &sh->state);
  2456. }
  2457. /* handle_stripe_clean_event
  2458. * any written block on an uptodate or failed drive can be returned.
  2459. * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
  2460. * never LOCKED, so we don't need to test 'failed' directly.
  2461. */
  2462. static void handle_stripe_clean_event(struct r5conf *conf,
  2463. struct stripe_head *sh, int disks, struct bio **return_bi)
  2464. {
  2465. int i;
  2466. struct r5dev *dev;
  2467. for (i = disks; i--; )
  2468. if (sh->dev[i].written) {
  2469. dev = &sh->dev[i];
  2470. if (!test_bit(R5_LOCKED, &dev->flags) &&
  2471. (test_bit(R5_UPTODATE, &dev->flags) ||
  2472. test_bit(R5_Discard, &dev->flags))) {
  2473. /* We can return any write requests */
  2474. struct bio *wbi, *wbi2;
  2475. pr_debug("Return write for disc %d\n", i);
  2476. if (test_and_clear_bit(R5_Discard, &dev->flags))
  2477. clear_bit(R5_UPTODATE, &dev->flags);
  2478. wbi = dev->written;
  2479. dev->written = NULL;
  2480. while (wbi && wbi->bi_sector <
  2481. dev->sector + STRIPE_SECTORS) {
  2482. wbi2 = r5_next_bio(wbi, dev->sector);
  2483. if (!raid5_dec_bi_active_stripes(wbi)) {
  2484. md_write_end(conf->mddev);
  2485. wbi->bi_next = *return_bi;
  2486. *return_bi = wbi;
  2487. }
  2488. wbi = wbi2;
  2489. }
  2490. bitmap_endwrite(conf->mddev->bitmap, sh->sector,
  2491. STRIPE_SECTORS,
  2492. !test_bit(STRIPE_DEGRADED, &sh->state),
  2493. 0);
  2494. }
  2495. } else if (test_bit(R5_Discard, &sh->dev[i].flags))
  2496. clear_bit(R5_Discard, &sh->dev[i].flags);
  2497. if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
  2498. if (atomic_dec_and_test(&conf->pending_full_writes))
  2499. md_wakeup_thread(conf->mddev->thread);
  2500. }
  2501. static void handle_stripe_dirtying(struct r5conf *conf,
  2502. struct stripe_head *sh,
  2503. struct stripe_head_state *s,
  2504. int disks)
  2505. {
  2506. int rmw = 0, rcw = 0, i;
  2507. sector_t recovery_cp = conf->mddev->recovery_cp;
  2508. /* RAID6 requires 'rcw' in current implementation.
  2509. * Otherwise, check whether resync is now happening or should start.
  2510. * If yes, then the array is dirty (after unclean shutdown or
  2511. * initial creation), so parity in some stripes might be inconsistent.
  2512. * In this case, we need to always do reconstruct-write, to ensure
  2513. * that in case of drive failure or read-error correction, we
  2514. * generate correct data from the parity.
  2515. */
  2516. if (conf->max_degraded == 2 ||
  2517. (recovery_cp < MaxSector && sh->sector >= recovery_cp)) {
  2518. /* Calculate the real rcw later - for now make it
  2519. * look like rcw is cheaper
  2520. */
  2521. rcw = 1; rmw = 2;
  2522. pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n",
  2523. conf->max_degraded, (unsigned long long)recovery_cp,
  2524. (unsigned long long)sh->sector);
  2525. } else for (i = disks; i--; ) {
  2526. /* would I have to read this buffer for read_modify_write */
  2527. struct r5dev *dev = &sh->dev[i];
  2528. if ((dev->towrite || i == sh->pd_idx) &&
  2529. !test_bit(R5_LOCKED, &dev->flags) &&
  2530. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2531. test_bit(R5_Wantcompute, &dev->flags))) {
  2532. if (test_bit(R5_Insync, &dev->flags))
  2533. rmw++;
  2534. else
  2535. rmw += 2*disks; /* cannot read it */
  2536. }
  2537. /* Would I have to read this buffer for reconstruct_write */
  2538. if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
  2539. !test_bit(R5_LOCKED, &dev->flags) &&
  2540. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2541. test_bit(R5_Wantcompute, &dev->flags))) {
  2542. if (test_bit(R5_Insync, &dev->flags)) rcw++;
  2543. else
  2544. rcw += 2*disks;
  2545. }
  2546. }
  2547. pr_debug("for sector %llu, rmw=%d rcw=%d\n",
  2548. (unsigned long long)sh->sector, rmw, rcw);
  2549. set_bit(STRIPE_HANDLE, &sh->state);
  2550. if (rmw < rcw && rmw > 0)
  2551. /* prefer read-modify-write, but need to get some data */
  2552. for (i = disks; i--; ) {
  2553. struct r5dev *dev = &sh->dev[i];
  2554. if ((dev->towrite || i == sh->pd_idx) &&
  2555. !test_bit(R5_LOCKED, &dev->flags) &&
  2556. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2557. test_bit(R5_Wantcompute, &dev->flags)) &&
  2558. test_bit(R5_Insync, &dev->flags)) {
  2559. if (
  2560. test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
  2561. pr_debug("Read_old block "
  2562. "%d for r-m-w\n", i);
  2563. set_bit(R5_LOCKED, &dev->flags);
  2564. set_bit(R5_Wantread, &dev->flags);
  2565. s->locked++;
  2566. } else {
  2567. set_bit(STRIPE_DELAYED, &sh->state);
  2568. set_bit(STRIPE_HANDLE, &sh->state);
  2569. }
  2570. }
  2571. }
  2572. if (rcw <= rmw && rcw > 0) {
  2573. /* want reconstruct write, but need to get some data */
  2574. rcw = 0;
  2575. for (i = disks; i--; ) {
  2576. struct r5dev *dev = &sh->dev[i];
  2577. if (!test_bit(R5_OVERWRITE, &dev->flags) &&
  2578. i != sh->pd_idx && i != sh->qd_idx &&
  2579. !test_bit(R5_LOCKED, &dev->flags) &&
  2580. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2581. test_bit(R5_Wantcompute, &dev->flags))) {
  2582. rcw++;
  2583. if (!test_bit(R5_Insync, &dev->flags))
  2584. continue; /* it's a failed drive */
  2585. if (
  2586. test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
  2587. pr_debug("Read_old block "
  2588. "%d for Reconstruct\n", i);
  2589. set_bit(R5_LOCKED, &dev->flags);
  2590. set_bit(R5_Wantread, &dev->flags);
  2591. s->locked++;
  2592. } else {
  2593. set_bit(STRIPE_DELAYED, &sh->state);
  2594. set_bit(STRIPE_HANDLE, &sh->state);
  2595. }
  2596. }
  2597. }
  2598. }
  2599. /* now if nothing is locked, and if we have enough data,
  2600. * we can start a write request
  2601. */
  2602. /* since handle_stripe can be called at any time we need to handle the
  2603. * case where a compute block operation has been submitted and then a
  2604. * subsequent call wants to start a write request. raid_run_ops only
  2605. * handles the case where compute block and reconstruct are requested
  2606. * simultaneously. If this is not the case then new writes need to be
  2607. * held off until the compute completes.
  2608. */
  2609. if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
  2610. (s->locked == 0 && (rcw == 0 || rmw == 0) &&
  2611. !test_bit(STRIPE_BIT_DELAY, &sh->state)))
  2612. schedule_reconstruction(sh, s, rcw == 0, 0);
  2613. }
  2614. static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
  2615. struct stripe_head_state *s, int disks)
  2616. {
  2617. struct r5dev *dev = NULL;
  2618. set_bit(STRIPE_HANDLE, &sh->state);
  2619. switch (sh->check_state) {
  2620. case check_state_idle:
  2621. /* start a new check operation if there are no failures */
  2622. if (s->failed == 0) {
  2623. BUG_ON(s->uptodate != disks);
  2624. sh->check_state = check_state_run;
  2625. set_bit(STRIPE_OP_CHECK, &s->ops_request);
  2626. clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
  2627. s->uptodate--;
  2628. break;
  2629. }
  2630. dev = &sh->dev[s->failed_num[0]];
  2631. /* fall through */
  2632. case check_state_compute_result:
  2633. sh->check_state = check_state_idle;
  2634. if (!dev)
  2635. dev = &sh->dev[sh->pd_idx];
  2636. /* check that a write has not made the stripe insync */
  2637. if (test_bit(STRIPE_INSYNC, &sh->state))
  2638. break;
  2639. /* either failed parity check, or recovery is happening */
  2640. BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
  2641. BUG_ON(s->uptodate != disks);
  2642. set_bit(R5_LOCKED, &dev->flags);
  2643. s->locked++;
  2644. set_bit(R5_Wantwrite, &dev->flags);
  2645. clear_bit(STRIPE_DEGRADED, &sh->state);
  2646. set_bit(STRIPE_INSYNC, &sh->state);
  2647. break;
  2648. case check_state_run:
  2649. break; /* we will be called again upon completion */
  2650. case check_state_check_result:
  2651. sh->check_state = check_state_idle;
  2652. /* if a failure occurred during the check operation, leave
  2653. * STRIPE_INSYNC not set and let the stripe be handled again
  2654. */
  2655. if (s->failed)
  2656. break;
  2657. /* handle a successful check operation, if parity is correct
  2658. * we are done. Otherwise update the mismatch count and repair
  2659. * parity if !MD_RECOVERY_CHECK
  2660. */
  2661. if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
  2662. /* parity is correct (on disc,
  2663. * not in buffer any more)
  2664. */
  2665. set_bit(STRIPE_INSYNC, &sh->state);
  2666. else {
  2667. atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
  2668. if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
  2669. /* don't try to repair!! */
  2670. set_bit(STRIPE_INSYNC, &sh->state);
  2671. else {
  2672. sh->check_state = check_state_compute_run;
  2673. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  2674. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  2675. set_bit(R5_Wantcompute,
  2676. &sh->dev[sh->pd_idx].flags);
  2677. sh->ops.target = sh->pd_idx;
  2678. sh->ops.target2 = -1;
  2679. s->uptodate++;
  2680. }
  2681. }
  2682. break;
  2683. case check_state_compute_run:
  2684. break;
  2685. default:
  2686. printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
  2687. __func__, sh->check_state,
  2688. (unsigned long long) sh->sector);
  2689. BUG();
  2690. }
  2691. }
  2692. static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
  2693. struct stripe_head_state *s,
  2694. int disks)
  2695. {
  2696. int pd_idx = sh->pd_idx;
  2697. int qd_idx = sh->qd_idx;
  2698. struct r5dev *dev;
  2699. set_bit(STRIPE_HANDLE, &sh->state);
  2700. BUG_ON(s->failed > 2);
  2701. /* Want to check and possibly repair P and Q.
  2702. * However there could be one 'failed' device, in which
  2703. * case we can only check one of them, possibly using the
  2704. * other to generate missing data
  2705. */
  2706. switch (sh->check_state) {
  2707. case check_state_idle:
  2708. /* start a new check operation if there are < 2 failures */
  2709. if (s->failed == s->q_failed) {
  2710. /* The only possible failed device holds Q, so it
  2711. * makes sense to check P (If anything else were failed,
  2712. * we would have used P to recreate it).
  2713. */
  2714. sh->check_state = check_state_run;
  2715. }
  2716. if (!s->q_failed && s->failed < 2) {
  2717. /* Q is not failed, and we didn't use it to generate
  2718. * anything, so it makes sense to check it
  2719. */
  2720. if (sh->check_state == check_state_run)
  2721. sh->check_state = check_state_run_pq;
  2722. else
  2723. sh->check_state = check_state_run_q;
  2724. }
  2725. /* discard potentially stale zero_sum_result */
  2726. sh->ops.zero_sum_result = 0;
  2727. if (sh->check_state == check_state_run) {
  2728. /* async_xor_zero_sum destroys the contents of P */
  2729. clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
  2730. s->uptodate--;
  2731. }
  2732. if (sh->check_state >= check_state_run &&
  2733. sh->check_state <= check_state_run_pq) {
  2734. /* async_syndrome_zero_sum preserves P and Q, so
  2735. * no need to mark them !uptodate here
  2736. */
  2737. set_bit(STRIPE_OP_CHECK, &s->ops_request);
  2738. break;
  2739. }
  2740. /* we have 2-disk failure */
  2741. BUG_ON(s->failed != 2);
  2742. /* fall through */
  2743. case check_state_compute_result:
  2744. sh->check_state = check_state_idle;
  2745. /* check that a write has not made the stripe insync */
  2746. if (test_bit(STRIPE_INSYNC, &sh->state))
  2747. break;
  2748. /* now write out any block on a failed drive,
  2749. * or P or Q if they were recomputed
  2750. */
  2751. BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
  2752. if (s->failed == 2) {
  2753. dev = &sh->dev[s->failed_num[1]];
  2754. s->locked++;
  2755. set_bit(R5_LOCKED, &dev->flags);
  2756. set_bit(R5_Wantwrite, &dev->flags);
  2757. }
  2758. if (s->failed >= 1) {
  2759. dev = &sh->dev[s->failed_num[0]];
  2760. s->locked++;
  2761. set_bit(R5_LOCKED, &dev->flags);
  2762. set_bit(R5_Wantwrite, &dev->flags);
  2763. }
  2764. if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
  2765. dev = &sh->dev[pd_idx];
  2766. s->locked++;
  2767. set_bit(R5_LOCKED, &dev->flags);
  2768. set_bit(R5_Wantwrite, &dev->flags);
  2769. }
  2770. if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
  2771. dev = &sh->dev[qd_idx];
  2772. s->locked++;
  2773. set_bit(R5_LOCKED, &dev->flags);
  2774. set_bit(R5_Wantwrite, &dev->flags);
  2775. }
  2776. clear_bit(STRIPE_DEGRADED, &sh->state);
  2777. set_bit(STRIPE_INSYNC, &sh->state);
  2778. break;
  2779. case check_state_run:
  2780. case check_state_run_q:
  2781. case check_state_run_pq:
  2782. break; /* we will be called again upon completion */
  2783. case check_state_check_result:
  2784. sh->check_state = check_state_idle;
  2785. /* handle a successful check operation, if parity is correct
  2786. * we are done. Otherwise update the mismatch count and repair
  2787. * parity if !MD_RECOVERY_CHECK
  2788. */
  2789. if (sh->ops.zero_sum_result == 0) {
  2790. /* both parities are correct */
  2791. if (!s->failed)
  2792. set_bit(STRIPE_INSYNC, &sh->state);
  2793. else {
  2794. /* in contrast to the raid5 case we can validate
  2795. * parity, but still have a failure to write
  2796. * back
  2797. */
  2798. sh->check_state = check_state_compute_result;
  2799. /* Returning at this point means that we may go
  2800. * off and bring p and/or q uptodate again so
  2801. * we make sure to check zero_sum_result again
  2802. * to verify if p or q need writeback
  2803. */
  2804. }
  2805. } else {
  2806. atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
  2807. if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
  2808. /* don't try to repair!! */
  2809. set_bit(STRIPE_INSYNC, &sh->state);
  2810. else {
  2811. int *target = &sh->ops.target;
  2812. sh->ops.target = -1;
  2813. sh->ops.target2 = -1;
  2814. sh->check_state = check_state_compute_run;
  2815. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  2816. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  2817. if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
  2818. set_bit(R5_Wantcompute,
  2819. &sh->dev[pd_idx].flags);
  2820. *target = pd_idx;
  2821. target = &sh->ops.target2;
  2822. s->uptodate++;
  2823. }
  2824. if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
  2825. set_bit(R5_Wantcompute,
  2826. &sh->dev[qd_idx].flags);
  2827. *target = qd_idx;
  2828. s->uptodate++;
  2829. }
  2830. }
  2831. }
  2832. break;
  2833. case check_state_compute_run:
  2834. break;
  2835. default:
  2836. printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
  2837. __func__, sh->check_state,
  2838. (unsigned long long) sh->sector);
  2839. BUG();
  2840. }
  2841. }
  2842. static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
  2843. {
  2844. int i;
  2845. /* We have read all the blocks in this stripe and now we need to
  2846. * copy some of them into a target stripe for expand.
  2847. */
  2848. struct dma_async_tx_descriptor *tx = NULL;
  2849. clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
  2850. for (i = 0; i < sh->disks; i++)
  2851. if (i != sh->pd_idx && i != sh->qd_idx) {
  2852. int dd_idx, j;
  2853. struct stripe_head *sh2;
  2854. struct async_submit_ctl submit;
  2855. sector_t bn = compute_blocknr(sh, i, 1);
  2856. sector_t s = raid5_compute_sector(conf, bn, 0,
  2857. &dd_idx, NULL);
  2858. sh2 = get_active_stripe(conf, s, 0, 1, 1);
  2859. if (sh2 == NULL)
  2860. /* so far only the early blocks of this stripe
  2861. * have been requested. When later blocks
  2862. * get requested, we will try again
  2863. */
  2864. continue;
  2865. if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
  2866. test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
  2867. /* must have already done this block */
  2868. release_stripe(sh2);
  2869. continue;
  2870. }
  2871. /* place all the copies on one channel */
  2872. init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
  2873. tx = async_memcpy(sh2->dev[dd_idx].page,
  2874. sh->dev[i].page, 0, 0, STRIPE_SIZE,
  2875. &submit);
  2876. set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
  2877. set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
  2878. for (j = 0; j < conf->raid_disks; j++)
  2879. if (j != sh2->pd_idx &&
  2880. j != sh2->qd_idx &&
  2881. !test_bit(R5_Expanded, &sh2->dev[j].flags))
  2882. break;
  2883. if (j == conf->raid_disks) {
  2884. set_bit(STRIPE_EXPAND_READY, &sh2->state);
  2885. set_bit(STRIPE_HANDLE, &sh2->state);
  2886. }
  2887. release_stripe(sh2);
  2888. }
  2889. /* done submitting copies, wait for them to complete */
  2890. if (tx) {
  2891. async_tx_ack(tx);
  2892. dma_wait_for_async_tx(tx);
  2893. }
  2894. }
  2895. /*
  2896. * handle_stripe - do things to a stripe.
  2897. *
  2898. * We lock the stripe by setting STRIPE_ACTIVE and then examine the
  2899. * state of various bits to see what needs to be done.
  2900. * Possible results:
  2901. * return some read requests which now have data
  2902. * return some write requests which are safely on storage
  2903. * schedule a read on some buffers
  2904. * schedule a write of some buffers
  2905. * return confirmation of parity correctness
  2906. *
  2907. */
  2908. static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
  2909. {
  2910. struct r5conf *conf = sh->raid_conf;
  2911. int disks = sh->disks;
  2912. struct r5dev *dev;
  2913. int i;
  2914. int do_recovery = 0;
  2915. memset(s, 0, sizeof(*s));
  2916. s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
  2917. s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
  2918. s->failed_num[0] = -1;
  2919. s->failed_num[1] = -1;
  2920. /* Now to look around and see what can be done */
  2921. rcu_read_lock();
  2922. for (i=disks; i--; ) {
  2923. struct md_rdev *rdev;
  2924. sector_t first_bad;
  2925. int bad_sectors;
  2926. int is_bad = 0;
  2927. dev = &sh->dev[i];
  2928. pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
  2929. i, dev->flags,
  2930. dev->toread, dev->towrite, dev->written);
  2931. /* maybe we can reply to a read
  2932. *
  2933. * new wantfill requests are only permitted while
  2934. * ops_complete_biofill is guaranteed to be inactive
  2935. */
  2936. if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
  2937. !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
  2938. set_bit(R5_Wantfill, &dev->flags);
  2939. /* now count some things */
  2940. if (test_bit(R5_LOCKED, &dev->flags))
  2941. s->locked++;
  2942. if (test_bit(R5_UPTODATE, &dev->flags))
  2943. s->uptodate++;
  2944. if (test_bit(R5_Wantcompute, &dev->flags)) {
  2945. s->compute++;
  2946. BUG_ON(s->compute > 2);
  2947. }
  2948. if (test_bit(R5_Wantfill, &dev->flags))
  2949. s->to_fill++;
  2950. else if (dev->toread)
  2951. s->to_read++;
  2952. if (dev->towrite) {
  2953. s->to_write++;
  2954. if (!test_bit(R5_OVERWRITE, &dev->flags))
  2955. s->non_overwrite++;
  2956. }
  2957. if (dev->written)
  2958. s->written++;
  2959. /* Prefer to use the replacement for reads, but only
  2960. * if it is recovered enough and has no bad blocks.
  2961. */
  2962. rdev = rcu_dereference(conf->disks[i].replacement);
  2963. if (rdev && !test_bit(Faulty, &rdev->flags) &&
  2964. rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
  2965. !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
  2966. &first_bad, &bad_sectors))
  2967. set_bit(R5_ReadRepl, &dev->flags);
  2968. else {
  2969. if (rdev)
  2970. set_bit(R5_NeedReplace, &dev->flags);
  2971. rdev = rcu_dereference(conf->disks[i].rdev);
  2972. clear_bit(R5_ReadRepl, &dev->flags);
  2973. }
  2974. if (rdev && test_bit(Faulty, &rdev->flags))
  2975. rdev = NULL;
  2976. if (rdev) {
  2977. is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
  2978. &first_bad, &bad_sectors);
  2979. if (s->blocked_rdev == NULL
  2980. && (test_bit(Blocked, &rdev->flags)
  2981. || is_bad < 0)) {
  2982. if (is_bad < 0)
  2983. set_bit(BlockedBadBlocks,
  2984. &rdev->flags);
  2985. s->blocked_rdev = rdev;
  2986. atomic_inc(&rdev->nr_pending);
  2987. }
  2988. }
  2989. clear_bit(R5_Insync, &dev->flags);
  2990. if (!rdev)
  2991. /* Not in-sync */;
  2992. else if (is_bad) {
  2993. /* also not in-sync */
  2994. if (!test_bit(WriteErrorSeen, &rdev->flags) &&
  2995. test_bit(R5_UPTODATE, &dev->flags)) {
  2996. /* treat as in-sync, but with a read error
  2997. * which we can now try to correct
  2998. */
  2999. set_bit(R5_Insync, &dev->flags);
  3000. set_bit(R5_ReadError, &dev->flags);
  3001. }
  3002. } else if (test_bit(In_sync, &rdev->flags))
  3003. set_bit(R5_Insync, &dev->flags);
  3004. else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
  3005. /* in sync if before recovery_offset */
  3006. set_bit(R5_Insync, &dev->flags);
  3007. else if (test_bit(R5_UPTODATE, &dev->flags) &&
  3008. test_bit(R5_Expanded, &dev->flags))
  3009. /* If we've reshaped into here, we assume it is Insync.
  3010. * We will shortly update recovery_offset to make
  3011. * it official.
  3012. */
  3013. set_bit(R5_Insync, &dev->flags);
  3014. if (rdev && test_bit(R5_WriteError, &dev->flags)) {
  3015. /* This flag does not apply to '.replacement'
  3016. * only to .rdev, so make sure to check that*/
  3017. struct md_rdev *rdev2 = rcu_dereference(
  3018. conf->disks[i].rdev);
  3019. if (rdev2 == rdev)
  3020. clear_bit(R5_Insync, &dev->flags);
  3021. if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
  3022. s->handle_bad_blocks = 1;
  3023. atomic_inc(&rdev2->nr_pending);
  3024. } else
  3025. clear_bit(R5_WriteError, &dev->flags);
  3026. }
  3027. if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
  3028. /* This flag does not apply to '.replacement'
  3029. * only to .rdev, so make sure to check that*/
  3030. struct md_rdev *rdev2 = rcu_dereference(
  3031. conf->disks[i].rdev);
  3032. if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
  3033. s->handle_bad_blocks = 1;
  3034. atomic_inc(&rdev2->nr_pending);
  3035. } else
  3036. clear_bit(R5_MadeGood, &dev->flags);
  3037. }
  3038. if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
  3039. struct md_rdev *rdev2 = rcu_dereference(
  3040. conf->disks[i].replacement);
  3041. if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
  3042. s->handle_bad_blocks = 1;
  3043. atomic_inc(&rdev2->nr_pending);
  3044. } else
  3045. clear_bit(R5_MadeGoodRepl, &dev->flags);
  3046. }
  3047. if (!test_bit(R5_Insync, &dev->flags)) {
  3048. /* The ReadError flag will just be confusing now */
  3049. clear_bit(R5_ReadError, &dev->flags);
  3050. clear_bit(R5_ReWrite, &dev->flags);
  3051. }
  3052. if (test_bit(R5_ReadError, &dev->flags))
  3053. clear_bit(R5_Insync, &dev->flags);
  3054. if (!test_bit(R5_Insync, &dev->flags)) {
  3055. if (s->failed < 2)
  3056. s->failed_num[s->failed] = i;
  3057. s->failed++;
  3058. if (rdev && !test_bit(Faulty, &rdev->flags))
  3059. do_recovery = 1;
  3060. }
  3061. }
  3062. if (test_bit(STRIPE_SYNCING, &sh->state)) {
  3063. /* If there is a failed device being replaced,
  3064. * we must be recovering.
  3065. * else if we are after recovery_cp, we must be syncing
  3066. * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
  3067. * else we can only be replacing
  3068. * sync and recovery both need to read all devices, and so
  3069. * use the same flag.
  3070. */
  3071. if (do_recovery ||
  3072. sh->sector >= conf->mddev->recovery_cp ||
  3073. test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
  3074. s->syncing = 1;
  3075. else
  3076. s->replacing = 1;
  3077. }
  3078. rcu_read_unlock();
  3079. }
  3080. static void handle_stripe(struct stripe_head *sh)
  3081. {
  3082. struct stripe_head_state s;
  3083. struct r5conf *conf = sh->raid_conf;
  3084. int i;
  3085. int prexor;
  3086. int disks = sh->disks;
  3087. struct r5dev *pdev, *qdev;
  3088. clear_bit(STRIPE_HANDLE, &sh->state);
  3089. if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
  3090. /* already being handled, ensure it gets handled
  3091. * again when current action finishes */
  3092. set_bit(STRIPE_HANDLE, &sh->state);
  3093. return;
  3094. }
  3095. if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
  3096. set_bit(STRIPE_SYNCING, &sh->state);
  3097. clear_bit(STRIPE_INSYNC, &sh->state);
  3098. }
  3099. clear_bit(STRIPE_DELAYED, &sh->state);
  3100. pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
  3101. "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
  3102. (unsigned long long)sh->sector, sh->state,
  3103. atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
  3104. sh->check_state, sh->reconstruct_state);
  3105. analyse_stripe(sh, &s);
  3106. if (s.handle_bad_blocks) {
  3107. set_bit(STRIPE_HANDLE, &sh->state);
  3108. goto finish;
  3109. }
  3110. if (unlikely(s.blocked_rdev)) {
  3111. if (s.syncing || s.expanding || s.expanded ||
  3112. s.replacing || s.to_write || s.written) {
  3113. set_bit(STRIPE_HANDLE, &sh->state);
  3114. goto finish;
  3115. }
  3116. /* There is nothing for the blocked_rdev to block */
  3117. rdev_dec_pending(s.blocked_rdev, conf->mddev);
  3118. s.blocked_rdev = NULL;
  3119. }
  3120. if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
  3121. set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
  3122. set_bit(STRIPE_BIOFILL_RUN, &sh->state);
  3123. }
  3124. pr_debug("locked=%d uptodate=%d to_read=%d"
  3125. " to_write=%d failed=%d failed_num=%d,%d\n",
  3126. s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
  3127. s.failed_num[0], s.failed_num[1]);
  3128. /* check if the array has lost more than max_degraded devices and,
  3129. * if so, some requests might need to be failed.
  3130. */
  3131. if (s.failed > conf->max_degraded) {
  3132. sh->check_state = 0;
  3133. sh->reconstruct_state = 0;
  3134. if (s.to_read+s.to_write+s.written)
  3135. handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
  3136. if (s.syncing + s.replacing)
  3137. handle_failed_sync(conf, sh, &s);
  3138. }
  3139. /* Now we check to see if any write operations have recently
  3140. * completed
  3141. */
  3142. prexor = 0;
  3143. if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
  3144. prexor = 1;
  3145. if (sh->reconstruct_state == reconstruct_state_drain_result ||
  3146. sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
  3147. sh->reconstruct_state = reconstruct_state_idle;
  3148. /* All the 'written' buffers and the parity block are ready to
  3149. * be written back to disk
  3150. */
  3151. BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
  3152. !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
  3153. BUG_ON(sh->qd_idx >= 0 &&
  3154. !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
  3155. !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
  3156. for (i = disks; i--; ) {
  3157. struct r5dev *dev = &sh->dev[i];
  3158. if (test_bit(R5_LOCKED, &dev->flags) &&
  3159. (i == sh->pd_idx || i == sh->qd_idx ||
  3160. dev->written)) {
  3161. pr_debug("Writing block %d\n", i);
  3162. set_bit(R5_Wantwrite, &dev->flags);
  3163. if (prexor)
  3164. continue;
  3165. if (!test_bit(R5_Insync, &dev->flags) ||
  3166. ((i == sh->pd_idx || i == sh->qd_idx) &&
  3167. s.failed == 0))
  3168. set_bit(STRIPE_INSYNC, &sh->state);
  3169. }
  3170. }
  3171. if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  3172. s.dec_preread_active = 1;
  3173. }
  3174. /*
  3175. * might be able to return some write requests if the parity blocks
  3176. * are safe, or on a failed drive
  3177. */
  3178. pdev = &sh->dev[sh->pd_idx];
  3179. s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
  3180. || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
  3181. qdev = &sh->dev[sh->qd_idx];
  3182. s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
  3183. || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
  3184. || conf->level < 6;
  3185. if (s.written &&
  3186. (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
  3187. && !test_bit(R5_LOCKED, &pdev->flags)
  3188. && (test_bit(R5_UPTODATE, &pdev->flags) ||
  3189. test_bit(R5_Discard, &pdev->flags))))) &&
  3190. (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
  3191. && !test_bit(R5_LOCKED, &qdev->flags)
  3192. && (test_bit(R5_UPTODATE, &qdev->flags) ||
  3193. test_bit(R5_Discard, &qdev->flags))))))
  3194. handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
  3195. /* Now we might consider reading some blocks, either to check/generate
  3196. * parity, or to satisfy requests
  3197. * or to load a block that is being partially written.
  3198. */
  3199. if (s.to_read || s.non_overwrite
  3200. || (conf->level == 6 && s.to_write && s.failed)
  3201. || (s.syncing && (s.uptodate + s.compute < disks))
  3202. || s.replacing
  3203. || s.expanding)
  3204. handle_stripe_fill(sh, &s, disks);
  3205. /* Now to consider new write requests and what else, if anything
  3206. * should be read. We do not handle new writes when:
  3207. * 1/ A 'write' operation (copy+xor) is already in flight.
  3208. * 2/ A 'check' operation is in flight, as it may clobber the parity
  3209. * block.
  3210. */
  3211. if (s.to_write && !sh->reconstruct_state && !sh->check_state)
  3212. handle_stripe_dirtying(conf, sh, &s, disks);
  3213. /* maybe we need to check and possibly fix the parity for this stripe
  3214. * Any reads will already have been scheduled, so we just see if enough
  3215. * data is available. The parity check is held off while parity
  3216. * dependent operations are in flight.
  3217. */
  3218. if (sh->check_state ||
  3219. (s.syncing && s.locked == 0 &&
  3220. !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
  3221. !test_bit(STRIPE_INSYNC, &sh->state))) {
  3222. if (conf->level == 6)
  3223. handle_parity_checks6(conf, sh, &s, disks);
  3224. else
  3225. handle_parity_checks5(conf, sh, &s, disks);
  3226. }
  3227. if (s.replacing && s.locked == 0
  3228. && !test_bit(STRIPE_INSYNC, &sh->state)) {
  3229. /* Write out to replacement devices where possible */
  3230. for (i = 0; i < conf->raid_disks; i++)
  3231. if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
  3232. test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
  3233. set_bit(R5_WantReplace, &sh->dev[i].flags);
  3234. set_bit(R5_LOCKED, &sh->dev[i].flags);
  3235. s.locked++;
  3236. }
  3237. set_bit(STRIPE_INSYNC, &sh->state);
  3238. }
  3239. if ((s.syncing || s.replacing) && s.locked == 0 &&
  3240. test_bit(STRIPE_INSYNC, &sh->state)) {
  3241. md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
  3242. clear_bit(STRIPE_SYNCING, &sh->state);
  3243. }
  3244. /* If the failed drives are just a ReadError, then we might need
  3245. * to progress the repair/check process
  3246. */
  3247. if (s.failed <= conf->max_degraded && !conf->mddev->ro)
  3248. for (i = 0; i < s.failed; i++) {
  3249. struct r5dev *dev = &sh->dev[s.failed_num[i]];
  3250. if (test_bit(R5_ReadError, &dev->flags)
  3251. && !test_bit(R5_LOCKED, &dev->flags)
  3252. && test_bit(R5_UPTODATE, &dev->flags)
  3253. ) {
  3254. if (!test_bit(R5_ReWrite, &dev->flags)) {
  3255. set_bit(R5_Wantwrite, &dev->flags);
  3256. set_bit(R5_ReWrite, &dev->flags);
  3257. set_bit(R5_LOCKED, &dev->flags);
  3258. s.locked++;
  3259. } else {
  3260. /* let's read it back */
  3261. set_bit(R5_Wantread, &dev->flags);
  3262. set_bit(R5_LOCKED, &dev->flags);
  3263. s.locked++;
  3264. }
  3265. }
  3266. }
  3267. /* Finish reconstruct operations initiated by the expansion process */
  3268. if (sh->reconstruct_state == reconstruct_state_result) {
  3269. struct stripe_head *sh_src
  3270. = get_active_stripe(conf, sh->sector, 1, 1, 1);
  3271. if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
  3272. /* sh cannot be written until sh_src has been read.
  3273. * so arrange for sh to be delayed a little
  3274. */
  3275. set_bit(STRIPE_DELAYED, &sh->state);
  3276. set_bit(STRIPE_HANDLE, &sh->state);
  3277. if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
  3278. &sh_src->state))
  3279. atomic_inc(&conf->preread_active_stripes);
  3280. release_stripe(sh_src);
  3281. goto finish;
  3282. }
  3283. if (sh_src)
  3284. release_stripe(sh_src);
  3285. sh->reconstruct_state = reconstruct_state_idle;
  3286. clear_bit(STRIPE_EXPANDING, &sh->state);
  3287. for (i = conf->raid_disks; i--; ) {
  3288. set_bit(R5_Wantwrite, &sh->dev[i].flags);
  3289. set_bit(R5_LOCKED, &sh->dev[i].flags);
  3290. s.locked++;
  3291. }
  3292. }
  3293. if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
  3294. !sh->reconstruct_state) {
  3295. /* Need to write out all blocks after computing parity */
  3296. sh->disks = conf->raid_disks;
  3297. stripe_set_idx(sh->sector, conf, 0, sh);
  3298. schedule_reconstruction(sh, &s, 1, 1);
  3299. } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
  3300. clear_bit(STRIPE_EXPAND_READY, &sh->state);
  3301. atomic_dec(&conf->reshape_stripes);
  3302. wake_up(&conf->wait_for_overlap);
  3303. md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
  3304. }
  3305. if (s.expanding && s.locked == 0 &&
  3306. !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
  3307. handle_stripe_expansion(conf, sh);
  3308. finish:
  3309. /* wait for this device to become unblocked */
  3310. if (unlikely(s.blocked_rdev)) {
  3311. if (conf->mddev->external)
  3312. md_wait_for_blocked_rdev(s.blocked_rdev,
  3313. conf->mddev);
  3314. else
  3315. /* Internal metadata will immediately
  3316. * be written by raid5d, so we don't
  3317. * need to wait here.
  3318. */
  3319. rdev_dec_pending(s.blocked_rdev,
  3320. conf->mddev);
  3321. }
  3322. if (s.handle_bad_blocks)
  3323. for (i = disks; i--; ) {
  3324. struct md_rdev *rdev;
  3325. struct r5dev *dev = &sh->dev[i];
  3326. if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
  3327. /* We own a safe reference to the rdev */
  3328. rdev = conf->disks[i].rdev;
  3329. if (!rdev_set_badblocks(rdev, sh->sector,
  3330. STRIPE_SECTORS, 0))
  3331. md_error(conf->mddev, rdev);
  3332. rdev_dec_pending(rdev, conf->mddev);
  3333. }
  3334. if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
  3335. rdev = conf->disks[i].rdev;
  3336. rdev_clear_badblocks(rdev, sh->sector,
  3337. STRIPE_SECTORS, 0);
  3338. rdev_dec_pending(rdev, conf->mddev);
  3339. }
  3340. if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
  3341. rdev = conf->disks[i].replacement;
  3342. if (!rdev)
  3343. /* rdev have been moved down */
  3344. rdev = conf->disks[i].rdev;
  3345. rdev_clear_badblocks(rdev, sh->sector,
  3346. STRIPE_SECTORS, 0);
  3347. rdev_dec_pending(rdev, conf->mddev);
  3348. }
  3349. }
  3350. if (s.ops_request)
  3351. raid_run_ops(sh, s.ops_request);
  3352. ops_run_io(sh, &s);
  3353. if (s.dec_preread_active) {
  3354. /* We delay this until after ops_run_io so that if make_request
  3355. * is waiting on a flush, it won't continue until the writes
  3356. * have actually been submitted.
  3357. */
  3358. atomic_dec(&conf->preread_active_stripes);
  3359. if (atomic_read(&conf->preread_active_stripes) <
  3360. IO_THRESHOLD)
  3361. md_wakeup_thread(conf->mddev->thread);
  3362. }
  3363. return_io(s.return_bi);
  3364. clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
  3365. }
  3366. static void raid5_activate_delayed(struct r5conf *conf)
  3367. {
  3368. if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
  3369. while (!list_empty(&conf->delayed_list)) {
  3370. struct list_head *l = conf->delayed_list.next;
  3371. struct stripe_head *sh;
  3372. sh = list_entry(l, struct stripe_head, lru);
  3373. list_del_init(l);
  3374. clear_bit(STRIPE_DELAYED, &sh->state);
  3375. if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  3376. atomic_inc(&conf->preread_active_stripes);
  3377. list_add_tail(&sh->lru, &conf->hold_list);
  3378. }
  3379. }
  3380. }
  3381. static void activate_bit_delay(struct r5conf *conf)
  3382. {
  3383. /* device_lock is held */
  3384. struct list_head head;
  3385. list_add(&head, &conf->bitmap_list);
  3386. list_del_init(&conf->bitmap_list);
  3387. while (!list_empty(&head)) {
  3388. struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
  3389. list_del_init(&sh->lru);
  3390. atomic_inc(&sh->count);
  3391. __release_stripe(conf, sh);
  3392. }
  3393. }
  3394. int md_raid5_congested(struct mddev *mddev, int bits)
  3395. {
  3396. struct r5conf *conf = mddev->private;
  3397. /* No difference between reads and writes. Just check
  3398. * how busy the stripe_cache is
  3399. */
  3400. if (conf->inactive_blocked)
  3401. return 1;
  3402. if (conf->quiesce)
  3403. return 1;
  3404. if (list_empty_careful(&conf->inactive_list))
  3405. return 1;
  3406. return 0;
  3407. }
  3408. EXPORT_SYMBOL_GPL(md_raid5_congested);
  3409. static int raid5_congested(void *data, int bits)
  3410. {
  3411. struct mddev *mddev = data;
  3412. return mddev_congested(mddev, bits) ||
  3413. md_raid5_congested(mddev, bits);
  3414. }
  3415. /* We want read requests to align with chunks where possible,
  3416. * but write requests don't need to.
  3417. */
  3418. static int raid5_mergeable_bvec(struct request_queue *q,
  3419. struct bvec_merge_data *bvm,
  3420. struct bio_vec *biovec)
  3421. {
  3422. struct mddev *mddev = q->queuedata;
  3423. sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
  3424. int max;
  3425. unsigned int chunk_sectors = mddev->chunk_sectors;
  3426. unsigned int bio_sectors = bvm->bi_size >> 9;
  3427. if ((bvm->bi_rw & 1) == WRITE)
  3428. return biovec->bv_len; /* always allow writes to be mergeable */
  3429. if (mddev->new_chunk_sectors < mddev->chunk_sectors)
  3430. chunk_sectors = mddev->new_chunk_sectors;
  3431. max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
  3432. if (max < 0) max = 0;
  3433. if (max <= biovec->bv_len && bio_sectors == 0)
  3434. return biovec->bv_len;
  3435. else
  3436. return max;
  3437. }
  3438. static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
  3439. {
  3440. sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
  3441. unsigned int chunk_sectors = mddev->chunk_sectors;
  3442. unsigned int bio_sectors = bio->bi_size >> 9;
  3443. if (mddev->new_chunk_sectors < mddev->chunk_sectors)
  3444. chunk_sectors = mddev->new_chunk_sectors;
  3445. return chunk_sectors >=
  3446. ((sector & (chunk_sectors - 1)) + bio_sectors);
  3447. }
  3448. /*
  3449. * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
  3450. * later sampled by raid5d.
  3451. */
  3452. static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
  3453. {
  3454. unsigned long flags;
  3455. spin_lock_irqsave(&conf->device_lock, flags);
  3456. bi->bi_next = conf->retry_read_aligned_list;
  3457. conf->retry_read_aligned_list = bi;
  3458. spin_unlock_irqrestore(&conf->device_lock, flags);
  3459. md_wakeup_thread(conf->mddev->thread);
  3460. }
  3461. static struct bio *remove_bio_from_retry(struct r5conf *conf)
  3462. {
  3463. struct bio *bi;
  3464. bi = conf->retry_read_aligned;
  3465. if (bi) {
  3466. conf->retry_read_aligned = NULL;
  3467. return bi;
  3468. }
  3469. bi = conf->retry_read_aligned_list;
  3470. if(bi) {
  3471. conf->retry_read_aligned_list = bi->bi_next;
  3472. bi->bi_next = NULL;
  3473. /*
  3474. * this sets the active strip count to 1 and the processed
  3475. * strip count to zero (upper 8 bits)
  3476. */
  3477. raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
  3478. }
  3479. return bi;
  3480. }
  3481. /*
  3482. * The "raid5_align_endio" should check if the read succeeded and if it
  3483. * did, call bio_endio on the original bio (having bio_put the new bio
  3484. * first).
  3485. * If the read failed..
  3486. */
  3487. static void raid5_align_endio(struct bio *bi, int error)
  3488. {
  3489. struct bio* raid_bi = bi->bi_private;
  3490. struct mddev *mddev;
  3491. struct r5conf *conf;
  3492. int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
  3493. struct md_rdev *rdev;
  3494. bio_put(bi);
  3495. rdev = (void*)raid_bi->bi_next;
  3496. raid_bi->bi_next = NULL;
  3497. mddev = rdev->mddev;
  3498. conf = mddev->private;
  3499. rdev_dec_pending(rdev, conf->mddev);
  3500. if (!error && uptodate) {
  3501. bio_endio(raid_bi, 0);
  3502. if (atomic_dec_and_test(&conf->active_aligned_reads))
  3503. wake_up(&conf->wait_for_stripe);
  3504. return;
  3505. }
  3506. pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
  3507. add_bio_to_retry(raid_bi, conf);
  3508. }
  3509. static int bio_fits_rdev(struct bio *bi)
  3510. {
  3511. struct request_queue *q = bdev_get_queue(bi->bi_bdev);
  3512. if ((bi->bi_size>>9) > queue_max_sectors(q))
  3513. return 0;
  3514. blk_recount_segments(q, bi);
  3515. if (bi->bi_phys_segments > queue_max_segments(q))
  3516. return 0;
  3517. if (q->merge_bvec_fn)
  3518. /* it's too hard to apply the merge_bvec_fn at this stage,
  3519. * just just give up
  3520. */
  3521. return 0;
  3522. return 1;
  3523. }
  3524. static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
  3525. {
  3526. struct r5conf *conf = mddev->private;
  3527. int dd_idx;
  3528. struct bio* align_bi;
  3529. struct md_rdev *rdev;
  3530. sector_t end_sector;
  3531. if (!in_chunk_boundary(mddev, raid_bio)) {
  3532. pr_debug("chunk_aligned_read : non aligned\n");
  3533. return 0;
  3534. }
  3535. /*
  3536. * use bio_clone_mddev to make a copy of the bio
  3537. */
  3538. align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
  3539. if (!align_bi)
  3540. return 0;
  3541. /*
  3542. * set bi_end_io to a new function, and set bi_private to the
  3543. * original bio.
  3544. */
  3545. align_bi->bi_end_io = raid5_align_endio;
  3546. align_bi->bi_private = raid_bio;
  3547. /*
  3548. * compute position
  3549. */
  3550. align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
  3551. 0,
  3552. &dd_idx, NULL);
  3553. end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9);
  3554. rcu_read_lock();
  3555. rdev = rcu_dereference(conf->disks[dd_idx].replacement);
  3556. if (!rdev || test_bit(Faulty, &rdev->flags) ||
  3557. rdev->recovery_offset < end_sector) {
  3558. rdev = rcu_dereference(conf->disks[dd_idx].rdev);
  3559. if (rdev &&
  3560. (test_bit(Faulty, &rdev->flags) ||
  3561. !(test_bit(In_sync, &rdev->flags) ||
  3562. rdev->recovery_offset >= end_sector)))
  3563. rdev = NULL;
  3564. }
  3565. if (rdev) {
  3566. sector_t first_bad;
  3567. int bad_sectors;
  3568. atomic_inc(&rdev->nr_pending);
  3569. rcu_read_unlock();
  3570. raid_bio->bi_next = (void*)rdev;
  3571. align_bi->bi_bdev = rdev->bdev;
  3572. align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
  3573. if (!bio_fits_rdev(align_bi) ||
  3574. is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
  3575. &first_bad, &bad_sectors)) {
  3576. /* too big in some way, or has a known bad block */
  3577. bio_put(align_bi);
  3578. rdev_dec_pending(rdev, mddev);
  3579. return 0;
  3580. }
  3581. /* No reshape active, so we can trust rdev->data_offset */
  3582. align_bi->bi_sector += rdev->data_offset;
  3583. spin_lock_irq(&conf->device_lock);
  3584. wait_event_lock_irq(conf->wait_for_stripe,
  3585. conf->quiesce == 0,
  3586. conf->device_lock);
  3587. atomic_inc(&conf->active_aligned_reads);
  3588. spin_unlock_irq(&conf->device_lock);
  3589. generic_make_request(align_bi);
  3590. return 1;
  3591. } else {
  3592. rcu_read_unlock();
  3593. bio_put(align_bi);
  3594. return 0;
  3595. }
  3596. }
  3597. /* __get_priority_stripe - get the next stripe to process
  3598. *
  3599. * Full stripe writes are allowed to pass preread active stripes up until
  3600. * the bypass_threshold is exceeded. In general the bypass_count
  3601. * increments when the handle_list is handled before the hold_list; however, it
  3602. * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
  3603. * stripe with in flight i/o. The bypass_count will be reset when the
  3604. * head of the hold_list has changed, i.e. the head was promoted to the
  3605. * handle_list.
  3606. */
  3607. static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
  3608. {
  3609. struct stripe_head *sh;
  3610. pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
  3611. __func__,
  3612. list_empty(&conf->handle_list) ? "empty" : "busy",
  3613. list_empty(&conf->hold_list) ? "empty" : "busy",
  3614. atomic_read(&conf->pending_full_writes), conf->bypass_count);
  3615. if (!list_empty(&conf->handle_list)) {
  3616. sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
  3617. if (list_empty(&conf->hold_list))
  3618. conf->bypass_count = 0;
  3619. else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
  3620. if (conf->hold_list.next == conf->last_hold)
  3621. conf->bypass_count++;
  3622. else {
  3623. conf->last_hold = conf->hold_list.next;
  3624. conf->bypass_count -= conf->bypass_threshold;
  3625. if (conf->bypass_count < 0)
  3626. conf->bypass_count = 0;
  3627. }
  3628. }
  3629. } else if (!list_empty(&conf->hold_list) &&
  3630. ((conf->bypass_threshold &&
  3631. conf->bypass_count > conf->bypass_threshold) ||
  3632. atomic_read(&conf->pending_full_writes) == 0)) {
  3633. sh = list_entry(conf->hold_list.next,
  3634. typeof(*sh), lru);
  3635. conf->bypass_count -= conf->bypass_threshold;
  3636. if (conf->bypass_count < 0)
  3637. conf->bypass_count = 0;
  3638. } else
  3639. return NULL;
  3640. list_del_init(&sh->lru);
  3641. atomic_inc(&sh->count);
  3642. BUG_ON(atomic_read(&sh->count) != 1);
  3643. return sh;
  3644. }
  3645. struct raid5_plug_cb {
  3646. struct blk_plug_cb cb;
  3647. struct list_head list;
  3648. };
  3649. static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
  3650. {
  3651. struct raid5_plug_cb *cb = container_of(
  3652. blk_cb, struct raid5_plug_cb, cb);
  3653. struct stripe_head *sh;
  3654. struct mddev *mddev = cb->cb.data;
  3655. struct r5conf *conf = mddev->private;
  3656. if (cb->list.next && !list_empty(&cb->list)) {
  3657. spin_lock_irq(&conf->device_lock);
  3658. while (!list_empty(&cb->list)) {
  3659. sh = list_first_entry(&cb->list, struct stripe_head, lru);
  3660. list_del_init(&sh->lru);
  3661. /*
  3662. * avoid race release_stripe_plug() sees
  3663. * STRIPE_ON_UNPLUG_LIST clear but the stripe
  3664. * is still in our list
  3665. */
  3666. smp_mb__before_clear_bit();
  3667. clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
  3668. __release_stripe(conf, sh);
  3669. }
  3670. spin_unlock_irq(&conf->device_lock);
  3671. }
  3672. kfree(cb);
  3673. }
  3674. static void release_stripe_plug(struct mddev *mddev,
  3675. struct stripe_head *sh)
  3676. {
  3677. struct blk_plug_cb *blk_cb = blk_check_plugged(
  3678. raid5_unplug, mddev,
  3679. sizeof(struct raid5_plug_cb));
  3680. struct raid5_plug_cb *cb;
  3681. if (!blk_cb) {
  3682. release_stripe(sh);
  3683. return;
  3684. }
  3685. cb = container_of(blk_cb, struct raid5_plug_cb, cb);
  3686. if (cb->list.next == NULL)
  3687. INIT_LIST_HEAD(&cb->list);
  3688. if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
  3689. list_add_tail(&sh->lru, &cb->list);
  3690. else
  3691. release_stripe(sh);
  3692. }
  3693. static void make_discard_request(struct mddev *mddev, struct bio *bi)
  3694. {
  3695. struct r5conf *conf = mddev->private;
  3696. sector_t logical_sector, last_sector;
  3697. struct stripe_head *sh;
  3698. int remaining;
  3699. int stripe_sectors;
  3700. if (mddev->reshape_position != MaxSector)
  3701. /* Skip discard while reshape is happening */
  3702. return;
  3703. logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
  3704. last_sector = bi->bi_sector + (bi->bi_size>>9);
  3705. bi->bi_next = NULL;
  3706. bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
  3707. stripe_sectors = conf->chunk_sectors *
  3708. (conf->raid_disks - conf->max_degraded);
  3709. logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
  3710. stripe_sectors);
  3711. sector_div(last_sector, stripe_sectors);
  3712. logical_sector *= conf->chunk_sectors;
  3713. last_sector *= conf->chunk_sectors;
  3714. for (; logical_sector < last_sector;
  3715. logical_sector += STRIPE_SECTORS) {
  3716. DEFINE_WAIT(w);
  3717. int d;
  3718. again:
  3719. sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
  3720. prepare_to_wait(&conf->wait_for_overlap, &w,
  3721. TASK_UNINTERRUPTIBLE);
  3722. spin_lock_irq(&sh->stripe_lock);
  3723. for (d = 0; d < conf->raid_disks; d++) {
  3724. if (d == sh->pd_idx || d == sh->qd_idx)
  3725. continue;
  3726. if (sh->dev[d].towrite || sh->dev[d].toread) {
  3727. set_bit(R5_Overlap, &sh->dev[d].flags);
  3728. spin_unlock_irq(&sh->stripe_lock);
  3729. release_stripe(sh);
  3730. schedule();
  3731. goto again;
  3732. }
  3733. }
  3734. finish_wait(&conf->wait_for_overlap, &w);
  3735. for (d = 0; d < conf->raid_disks; d++) {
  3736. if (d == sh->pd_idx || d == sh->qd_idx)
  3737. continue;
  3738. sh->dev[d].towrite = bi;
  3739. set_bit(R5_OVERWRITE, &sh->dev[d].flags);
  3740. raid5_inc_bi_active_stripes(bi);
  3741. }
  3742. spin_unlock_irq(&sh->stripe_lock);
  3743. if (conf->mddev->bitmap) {
  3744. for (d = 0;
  3745. d < conf->raid_disks - conf->max_degraded;
  3746. d++)
  3747. bitmap_startwrite(mddev->bitmap,
  3748. sh->sector,
  3749. STRIPE_SECTORS,
  3750. 0);
  3751. sh->bm_seq = conf->seq_flush + 1;
  3752. set_bit(STRIPE_BIT_DELAY, &sh->state);
  3753. }
  3754. set_bit(STRIPE_HANDLE, &sh->state);
  3755. clear_bit(STRIPE_DELAYED, &sh->state);
  3756. if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  3757. atomic_inc(&conf->preread_active_stripes);
  3758. release_stripe_plug(mddev, sh);
  3759. }
  3760. remaining = raid5_dec_bi_active_stripes(bi);
  3761. if (remaining == 0) {
  3762. md_write_end(mddev);
  3763. bio_endio(bi, 0);
  3764. }
  3765. }
  3766. static void make_request(struct mddev *mddev, struct bio * bi)
  3767. {
  3768. struct r5conf *conf = mddev->private;
  3769. int dd_idx;
  3770. sector_t new_sector;
  3771. sector_t logical_sector, last_sector;
  3772. struct stripe_head *sh;
  3773. const int rw = bio_data_dir(bi);
  3774. int remaining;
  3775. if (unlikely(bi->bi_rw & REQ_FLUSH)) {
  3776. md_flush_request(mddev, bi);
  3777. return;
  3778. }
  3779. md_write_start(mddev, bi);
  3780. if (rw == READ &&
  3781. mddev->reshape_position == MaxSector &&
  3782. chunk_aligned_read(mddev,bi))
  3783. return;
  3784. if (unlikely(bi->bi_rw & REQ_DISCARD)) {
  3785. make_discard_request(mddev, bi);
  3786. return;
  3787. }
  3788. logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
  3789. last_sector = bi->bi_sector + (bi->bi_size>>9);
  3790. bi->bi_next = NULL;
  3791. bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
  3792. for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
  3793. DEFINE_WAIT(w);
  3794. int previous;
  3795. retry:
  3796. previous = 0;
  3797. prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
  3798. if (unlikely(conf->reshape_progress != MaxSector)) {
  3799. /* spinlock is needed as reshape_progress may be
  3800. * 64bit on a 32bit platform, and so it might be
  3801. * possible to see a half-updated value
  3802. * Of course reshape_progress could change after
  3803. * the lock is dropped, so once we get a reference
  3804. * to the stripe that we think it is, we will have
  3805. * to check again.
  3806. */
  3807. spin_lock_irq(&conf->device_lock);
  3808. if (mddev->reshape_backwards
  3809. ? logical_sector < conf->reshape_progress
  3810. : logical_sector >= conf->reshape_progress) {
  3811. previous = 1;
  3812. } else {
  3813. if (mddev->reshape_backwards
  3814. ? logical_sector < conf->reshape_safe
  3815. : logical_sector >= conf->reshape_safe) {
  3816. spin_unlock_irq(&conf->device_lock);
  3817. schedule();
  3818. goto retry;
  3819. }
  3820. }
  3821. spin_unlock_irq(&conf->device_lock);
  3822. }
  3823. new_sector = raid5_compute_sector(conf, logical_sector,
  3824. previous,
  3825. &dd_idx, NULL);
  3826. pr_debug("raid456: make_request, sector %llu logical %llu\n",
  3827. (unsigned long long)new_sector,
  3828. (unsigned long long)logical_sector);
  3829. sh = get_active_stripe(conf, new_sector, previous,
  3830. (bi->bi_rw&RWA_MASK), 0);
  3831. if (sh) {
  3832. if (unlikely(previous)) {
  3833. /* expansion might have moved on while waiting for a
  3834. * stripe, so we must do the range check again.
  3835. * Expansion could still move past after this
  3836. * test, but as we are holding a reference to
  3837. * 'sh', we know that if that happens,
  3838. * STRIPE_EXPANDING will get set and the expansion
  3839. * won't proceed until we finish with the stripe.
  3840. */
  3841. int must_retry = 0;
  3842. spin_lock_irq(&conf->device_lock);
  3843. if (mddev->reshape_backwards
  3844. ? logical_sector >= conf->reshape_progress
  3845. : logical_sector < conf->reshape_progress)
  3846. /* mismatch, need to try again */
  3847. must_retry = 1;
  3848. spin_unlock_irq(&conf->device_lock);
  3849. if (must_retry) {
  3850. release_stripe(sh);
  3851. schedule();
  3852. goto retry;
  3853. }
  3854. }
  3855. if (rw == WRITE &&
  3856. logical_sector >= mddev->suspend_lo &&
  3857. logical_sector < mddev->suspend_hi) {
  3858. release_stripe(sh);
  3859. /* As the suspend_* range is controlled by
  3860. * userspace, we want an interruptible
  3861. * wait.
  3862. */
  3863. flush_signals(current);
  3864. prepare_to_wait(&conf->wait_for_overlap,
  3865. &w, TASK_INTERRUPTIBLE);
  3866. if (logical_sector >= mddev->suspend_lo &&
  3867. logical_sector < mddev->suspend_hi)
  3868. schedule();
  3869. goto retry;
  3870. }
  3871. if (test_bit(STRIPE_EXPANDING, &sh->state) ||
  3872. !add_stripe_bio(sh, bi, dd_idx, rw)) {
  3873. /* Stripe is busy expanding or
  3874. * add failed due to overlap. Flush everything
  3875. * and wait a while
  3876. */
  3877. md_wakeup_thread(mddev->thread);
  3878. release_stripe(sh);
  3879. schedule();
  3880. goto retry;
  3881. }
  3882. finish_wait(&conf->wait_for_overlap, &w);
  3883. set_bit(STRIPE_HANDLE, &sh->state);
  3884. clear_bit(STRIPE_DELAYED, &sh->state);
  3885. if ((bi->bi_rw & REQ_SYNC) &&
  3886. !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  3887. atomic_inc(&conf->preread_active_stripes);
  3888. release_stripe_plug(mddev, sh);
  3889. } else {
  3890. /* cannot get stripe for read-ahead, just give-up */
  3891. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  3892. finish_wait(&conf->wait_for_overlap, &w);
  3893. break;
  3894. }
  3895. }
  3896. remaining = raid5_dec_bi_active_stripes(bi);
  3897. if (remaining == 0) {
  3898. if ( rw == WRITE )
  3899. md_write_end(mddev);
  3900. bio_endio(bi, 0);
  3901. }
  3902. }
  3903. static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
  3904. static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
  3905. {
  3906. /* reshaping is quite different to recovery/resync so it is
  3907. * handled quite separately ... here.
  3908. *
  3909. * On each call to sync_request, we gather one chunk worth of
  3910. * destination stripes and flag them as expanding.
  3911. * Then we find all the source stripes and request reads.
  3912. * As the reads complete, handle_stripe will copy the data
  3913. * into the destination stripe and release that stripe.
  3914. */
  3915. struct r5conf *conf = mddev->private;
  3916. struct stripe_head *sh;
  3917. sector_t first_sector, last_sector;
  3918. int raid_disks = conf->previous_raid_disks;
  3919. int data_disks = raid_disks - conf->max_degraded;
  3920. int new_data_disks = conf->raid_disks - conf->max_degraded;
  3921. int i;
  3922. int dd_idx;
  3923. sector_t writepos, readpos, safepos;
  3924. sector_t stripe_addr;
  3925. int reshape_sectors;
  3926. struct list_head stripes;
  3927. if (sector_nr == 0) {
  3928. /* If restarting in the middle, skip the initial sectors */
  3929. if (mddev->reshape_backwards &&
  3930. conf->reshape_progress < raid5_size(mddev, 0, 0)) {
  3931. sector_nr = raid5_size(mddev, 0, 0)
  3932. - conf->reshape_progress;
  3933. } else if (!mddev->reshape_backwards &&
  3934. conf->reshape_progress > 0)
  3935. sector_nr = conf->reshape_progress;
  3936. sector_div(sector_nr, new_data_disks);
  3937. if (sector_nr) {
  3938. mddev->curr_resync_completed = sector_nr;
  3939. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  3940. *skipped = 1;
  3941. return sector_nr;
  3942. }
  3943. }
  3944. /* We need to process a full chunk at a time.
  3945. * If old and new chunk sizes differ, we need to process the
  3946. * largest of these
  3947. */
  3948. if (mddev->new_chunk_sectors > mddev->chunk_sectors)
  3949. reshape_sectors = mddev->new_chunk_sectors;
  3950. else
  3951. reshape_sectors = mddev->chunk_sectors;
  3952. /* We update the metadata at least every 10 seconds, or when
  3953. * the data about to be copied would over-write the source of
  3954. * the data at the front of the range. i.e. one new_stripe
  3955. * along from reshape_progress new_maps to after where
  3956. * reshape_safe old_maps to
  3957. */
  3958. writepos = conf->reshape_progress;
  3959. sector_div(writepos, new_data_disks);
  3960. readpos = conf->reshape_progress;
  3961. sector_div(readpos, data_disks);
  3962. safepos = conf->reshape_safe;
  3963. sector_div(safepos, data_disks);
  3964. if (mddev->reshape_backwards) {
  3965. writepos -= min_t(sector_t, reshape_sectors, writepos);
  3966. readpos += reshape_sectors;
  3967. safepos += reshape_sectors;
  3968. } else {
  3969. writepos += reshape_sectors;
  3970. readpos -= min_t(sector_t, reshape_sectors, readpos);
  3971. safepos -= min_t(sector_t, reshape_sectors, safepos);
  3972. }
  3973. /* Having calculated the 'writepos' possibly use it
  3974. * to set 'stripe_addr' which is where we will write to.
  3975. */
  3976. if (mddev->reshape_backwards) {
  3977. BUG_ON(conf->reshape_progress == 0);
  3978. stripe_addr = writepos;
  3979. BUG_ON((mddev->dev_sectors &
  3980. ~((sector_t)reshape_sectors - 1))
  3981. - reshape_sectors - stripe_addr
  3982. != sector_nr);
  3983. } else {
  3984. BUG_ON(writepos != sector_nr + reshape_sectors);
  3985. stripe_addr = sector_nr;
  3986. }
  3987. /* 'writepos' is the most advanced device address we might write.
  3988. * 'readpos' is the least advanced device address we might read.
  3989. * 'safepos' is the least address recorded in the metadata as having
  3990. * been reshaped.
  3991. * If there is a min_offset_diff, these are adjusted either by
  3992. * increasing the safepos/readpos if diff is negative, or
  3993. * increasing writepos if diff is positive.
  3994. * If 'readpos' is then behind 'writepos', there is no way that we can
  3995. * ensure safety in the face of a crash - that must be done by userspace
  3996. * making a backup of the data. So in that case there is no particular
  3997. * rush to update metadata.
  3998. * Otherwise if 'safepos' is behind 'writepos', then we really need to
  3999. * update the metadata to advance 'safepos' to match 'readpos' so that
  4000. * we can be safe in the event of a crash.
  4001. * So we insist on updating metadata if safepos is behind writepos and
  4002. * readpos is beyond writepos.
  4003. * In any case, update the metadata every 10 seconds.
  4004. * Maybe that number should be configurable, but I'm not sure it is
  4005. * worth it.... maybe it could be a multiple of safemode_delay???
  4006. */
  4007. if (conf->min_offset_diff < 0) {
  4008. safepos += -conf->min_offset_diff;
  4009. readpos += -conf->min_offset_diff;
  4010. } else
  4011. writepos += conf->min_offset_diff;
  4012. if ((mddev->reshape_backwards
  4013. ? (safepos > writepos && readpos < writepos)
  4014. : (safepos < writepos && readpos > writepos)) ||
  4015. time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
  4016. /* Cannot proceed until we've updated the superblock... */
  4017. wait_event(conf->wait_for_overlap,
  4018. atomic_read(&conf->reshape_stripes)==0);
  4019. mddev->reshape_position = conf->reshape_progress;
  4020. mddev->curr_resync_completed = sector_nr;
  4021. conf->reshape_checkpoint = jiffies;
  4022. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  4023. md_wakeup_thread(mddev->thread);
  4024. wait_event(mddev->sb_wait, mddev->flags == 0 ||
  4025. kthread_should_stop());
  4026. spin_lock_irq(&conf->device_lock);
  4027. conf->reshape_safe = mddev->reshape_position;
  4028. spin_unlock_irq(&conf->device_lock);
  4029. wake_up(&conf->wait_for_overlap);
  4030. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  4031. }
  4032. INIT_LIST_HEAD(&stripes);
  4033. for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
  4034. int j;
  4035. int skipped_disk = 0;
  4036. sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
  4037. set_bit(STRIPE_EXPANDING, &sh->state);
  4038. atomic_inc(&conf->reshape_stripes);
  4039. /* If any of this stripe is beyond the end of the old
  4040. * array, then we need to zero those blocks
  4041. */
  4042. for (j=sh->disks; j--;) {
  4043. sector_t s;
  4044. if (j == sh->pd_idx)
  4045. continue;
  4046. if (conf->level == 6 &&
  4047. j == sh->qd_idx)
  4048. continue;
  4049. s = compute_blocknr(sh, j, 0);
  4050. if (s < raid5_size(mddev, 0, 0)) {
  4051. skipped_disk = 1;
  4052. continue;
  4053. }
  4054. memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
  4055. set_bit(R5_Expanded, &sh->dev[j].flags);
  4056. set_bit(R5_UPTODATE, &sh->dev[j].flags);
  4057. }
  4058. if (!skipped_disk) {
  4059. set_bit(STRIPE_EXPAND_READY, &sh->state);
  4060. set_bit(STRIPE_HANDLE, &sh->state);
  4061. }
  4062. list_add(&sh->lru, &stripes);
  4063. }
  4064. spin_lock_irq(&conf->device_lock);
  4065. if (mddev->reshape_backwards)
  4066. conf->reshape_progress -= reshape_sectors * new_data_disks;
  4067. else
  4068. conf->reshape_progress += reshape_sectors * new_data_disks;
  4069. spin_unlock_irq(&conf->device_lock);
  4070. /* Ok, those stripe are ready. We can start scheduling
  4071. * reads on the source stripes.
  4072. * The source stripes are determined by mapping the first and last
  4073. * block on the destination stripes.
  4074. */
  4075. first_sector =
  4076. raid5_compute_sector(conf, stripe_addr*(new_data_disks),
  4077. 1, &dd_idx, NULL);
  4078. last_sector =
  4079. raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
  4080. * new_data_disks - 1),
  4081. 1, &dd_idx, NULL);
  4082. if (last_sector >= mddev->dev_sectors)
  4083. last_sector = mddev->dev_sectors - 1;
  4084. while (first_sector <= last_sector) {
  4085. sh = get_active_stripe(conf, first_sector, 1, 0, 1);
  4086. set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
  4087. set_bit(STRIPE_HANDLE, &sh->state);
  4088. release_stripe(sh);
  4089. first_sector += STRIPE_SECTORS;
  4090. }
  4091. /* Now that the sources are clearly marked, we can release
  4092. * the destination stripes
  4093. */
  4094. while (!list_empty(&stripes)) {
  4095. sh = list_entry(stripes.next, struct stripe_head, lru);
  4096. list_del_init(&sh->lru);
  4097. release_stripe(sh);
  4098. }
  4099. /* If this takes us to the resync_max point where we have to pause,
  4100. * then we need to write out the superblock.
  4101. */
  4102. sector_nr += reshape_sectors;
  4103. if ((sector_nr - mddev->curr_resync_completed) * 2
  4104. >= mddev->resync_max - mddev->curr_resync_completed) {
  4105. /* Cannot proceed until we've updated the superblock... */
  4106. wait_event(conf->wait_for_overlap,
  4107. atomic_read(&conf->reshape_stripes) == 0);
  4108. mddev->reshape_position = conf->reshape_progress;
  4109. mddev->curr_resync_completed = sector_nr;
  4110. conf->reshape_checkpoint = jiffies;
  4111. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  4112. md_wakeup_thread(mddev->thread);
  4113. wait_event(mddev->sb_wait,
  4114. !test_bit(MD_CHANGE_DEVS, &mddev->flags)
  4115. || kthread_should_stop());
  4116. spin_lock_irq(&conf->device_lock);
  4117. conf->reshape_safe = mddev->reshape_position;
  4118. spin_unlock_irq(&conf->device_lock);
  4119. wake_up(&conf->wait_for_overlap);
  4120. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  4121. }
  4122. return reshape_sectors;
  4123. }
  4124. /* FIXME go_faster isn't used */
  4125. static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
  4126. {
  4127. struct r5conf *conf = mddev->private;
  4128. struct stripe_head *sh;
  4129. sector_t max_sector = mddev->dev_sectors;
  4130. sector_t sync_blocks;
  4131. int still_degraded = 0;
  4132. int i;
  4133. if (sector_nr >= max_sector) {
  4134. /* just being told to finish up .. nothing much to do */
  4135. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
  4136. end_reshape(conf);
  4137. return 0;
  4138. }
  4139. if (mddev->curr_resync < max_sector) /* aborted */
  4140. bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
  4141. &sync_blocks, 1);
  4142. else /* completed sync */
  4143. conf->fullsync = 0;
  4144. bitmap_close_sync(mddev->bitmap);
  4145. return 0;
  4146. }
  4147. /* Allow raid5_quiesce to complete */
  4148. wait_event(conf->wait_for_overlap, conf->quiesce != 2);
  4149. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  4150. return reshape_request(mddev, sector_nr, skipped);
  4151. /* No need to check resync_max as we never do more than one
  4152. * stripe, and as resync_max will always be on a chunk boundary,
  4153. * if the check in md_do_sync didn't fire, there is no chance
  4154. * of overstepping resync_max here
  4155. */
  4156. /* if there is too many failed drives and we are trying
  4157. * to resync, then assert that we are finished, because there is
  4158. * nothing we can do.
  4159. */
  4160. if (mddev->degraded >= conf->max_degraded &&
  4161. test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  4162. sector_t rv = mddev->dev_sectors - sector_nr;
  4163. *skipped = 1;
  4164. return rv;
  4165. }
  4166. if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
  4167. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
  4168. !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
  4169. /* we can skip this block, and probably more */
  4170. sync_blocks /= STRIPE_SECTORS;
  4171. *skipped = 1;
  4172. return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
  4173. }
  4174. bitmap_cond_end_sync(mddev->bitmap, sector_nr);
  4175. sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
  4176. if (sh == NULL) {
  4177. sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
  4178. /* make sure we don't swamp the stripe cache if someone else
  4179. * is trying to get access
  4180. */
  4181. schedule_timeout_uninterruptible(1);
  4182. }
  4183. /* Need to check if array will still be degraded after recovery/resync
  4184. * We don't need to check the 'failed' flag as when that gets set,
  4185. * recovery aborts.
  4186. */
  4187. for (i = 0; i < conf->raid_disks; i++)
  4188. if (conf->disks[i].rdev == NULL)
  4189. still_degraded = 1;
  4190. bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
  4191. set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
  4192. handle_stripe(sh);
  4193. release_stripe(sh);
  4194. return STRIPE_SECTORS;
  4195. }
  4196. static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
  4197. {
  4198. /* We may not be able to submit a whole bio at once as there
  4199. * may not be enough stripe_heads available.
  4200. * We cannot pre-allocate enough stripe_heads as we may need
  4201. * more than exist in the cache (if we allow ever large chunks).
  4202. * So we do one stripe head at a time and record in
  4203. * ->bi_hw_segments how many have been done.
  4204. *
  4205. * We *know* that this entire raid_bio is in one chunk, so
  4206. * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
  4207. */
  4208. struct stripe_head *sh;
  4209. int dd_idx;
  4210. sector_t sector, logical_sector, last_sector;
  4211. int scnt = 0;
  4212. int remaining;
  4213. int handled = 0;
  4214. logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
  4215. sector = raid5_compute_sector(conf, logical_sector,
  4216. 0, &dd_idx, NULL);
  4217. last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
  4218. for (; logical_sector < last_sector;
  4219. logical_sector += STRIPE_SECTORS,
  4220. sector += STRIPE_SECTORS,
  4221. scnt++) {
  4222. if (scnt < raid5_bi_processed_stripes(raid_bio))
  4223. /* already done this stripe */
  4224. continue;
  4225. sh = get_active_stripe(conf, sector, 0, 1, 0);
  4226. if (!sh) {
  4227. /* failed to get a stripe - must wait */
  4228. raid5_set_bi_processed_stripes(raid_bio, scnt);
  4229. conf->retry_read_aligned = raid_bio;
  4230. return handled;
  4231. }
  4232. if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
  4233. release_stripe(sh);
  4234. raid5_set_bi_processed_stripes(raid_bio, scnt);
  4235. conf->retry_read_aligned = raid_bio;
  4236. return handled;
  4237. }
  4238. set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
  4239. handle_stripe(sh);
  4240. release_stripe(sh);
  4241. handled++;
  4242. }
  4243. remaining = raid5_dec_bi_active_stripes(raid_bio);
  4244. if (remaining == 0)
  4245. bio_endio(raid_bio, 0);
  4246. if (atomic_dec_and_test(&conf->active_aligned_reads))
  4247. wake_up(&conf->wait_for_stripe);
  4248. return handled;
  4249. }
  4250. #define MAX_STRIPE_BATCH 8
  4251. static int handle_active_stripes(struct r5conf *conf)
  4252. {
  4253. struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
  4254. int i, batch_size = 0;
  4255. while (batch_size < MAX_STRIPE_BATCH &&
  4256. (sh = __get_priority_stripe(conf)) != NULL)
  4257. batch[batch_size++] = sh;
  4258. if (batch_size == 0)
  4259. return batch_size;
  4260. spin_unlock_irq(&conf->device_lock);
  4261. for (i = 0; i < batch_size; i++)
  4262. handle_stripe(batch[i]);
  4263. cond_resched();
  4264. spin_lock_irq(&conf->device_lock);
  4265. for (i = 0; i < batch_size; i++)
  4266. __release_stripe(conf, batch[i]);
  4267. return batch_size;
  4268. }
  4269. /*
  4270. * This is our raid5 kernel thread.
  4271. *
  4272. * We scan the hash table for stripes which can be handled now.
  4273. * During the scan, completed stripes are saved for us by the interrupt
  4274. * handler, so that they will not have to wait for our next wakeup.
  4275. */
  4276. static void raid5d(struct md_thread *thread)
  4277. {
  4278. struct mddev *mddev = thread->mddev;
  4279. struct r5conf *conf = mddev->private;
  4280. int handled;
  4281. struct blk_plug plug;
  4282. pr_debug("+++ raid5d active\n");
  4283. md_check_recovery(mddev);
  4284. blk_start_plug(&plug);
  4285. handled = 0;
  4286. spin_lock_irq(&conf->device_lock);
  4287. while (1) {
  4288. struct bio *bio;
  4289. int batch_size;
  4290. if (
  4291. !list_empty(&conf->bitmap_list)) {
  4292. /* Now is a good time to flush some bitmap updates */
  4293. conf->seq_flush++;
  4294. spin_unlock_irq(&conf->device_lock);
  4295. bitmap_unplug(mddev->bitmap);
  4296. spin_lock_irq(&conf->device_lock);
  4297. conf->seq_write = conf->seq_flush;
  4298. activate_bit_delay(conf);
  4299. }
  4300. raid5_activate_delayed(conf);
  4301. while ((bio = remove_bio_from_retry(conf))) {
  4302. int ok;
  4303. spin_unlock_irq(&conf->device_lock);
  4304. ok = retry_aligned_read(conf, bio);
  4305. spin_lock_irq(&conf->device_lock);
  4306. if (!ok)
  4307. break;
  4308. handled++;
  4309. }
  4310. batch_size = handle_active_stripes(conf);
  4311. if (!batch_size)
  4312. break;
  4313. handled += batch_size;
  4314. if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
  4315. spin_unlock_irq(&conf->device_lock);
  4316. md_check_recovery(mddev);
  4317. spin_lock_irq(&conf->device_lock);
  4318. }
  4319. }
  4320. pr_debug("%d stripes handled\n", handled);
  4321. spin_unlock_irq(&conf->device_lock);
  4322. async_tx_issue_pending_all();
  4323. blk_finish_plug(&plug);
  4324. pr_debug("--- raid5d inactive\n");
  4325. }
  4326. static ssize_t
  4327. raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
  4328. {
  4329. struct r5conf *conf = mddev->private;
  4330. if (conf)
  4331. return sprintf(page, "%d\n", conf->max_nr_stripes);
  4332. else
  4333. return 0;
  4334. }
  4335. int
  4336. raid5_set_cache_size(struct mddev *mddev, int size)
  4337. {
  4338. struct r5conf *conf = mddev->private;
  4339. int err;
  4340. if (size <= 16 || size > 32768)
  4341. return -EINVAL;
  4342. while (size < conf->max_nr_stripes) {
  4343. if (drop_one_stripe(conf))
  4344. conf->max_nr_stripes--;
  4345. else
  4346. break;
  4347. }
  4348. err = md_allow_write(mddev);
  4349. if (err)
  4350. return err;
  4351. while (size > conf->max_nr_stripes) {
  4352. if (grow_one_stripe(conf))
  4353. conf->max_nr_stripes++;
  4354. else break;
  4355. }
  4356. return 0;
  4357. }
  4358. EXPORT_SYMBOL(raid5_set_cache_size);
  4359. static ssize_t
  4360. raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
  4361. {
  4362. struct r5conf *conf = mddev->private;
  4363. unsigned long new;
  4364. int err;
  4365. if (len >= PAGE_SIZE)
  4366. return -EINVAL;
  4367. if (!conf)
  4368. return -ENODEV;
  4369. if (strict_strtoul(page, 10, &new))
  4370. return -EINVAL;
  4371. err = raid5_set_cache_size(mddev, new);
  4372. if (err)
  4373. return err;
  4374. return len;
  4375. }
  4376. static struct md_sysfs_entry
  4377. raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
  4378. raid5_show_stripe_cache_size,
  4379. raid5_store_stripe_cache_size);
  4380. static ssize_t
  4381. raid5_show_preread_threshold(struct mddev *mddev, char *page)
  4382. {
  4383. struct r5conf *conf = mddev->private;
  4384. if (conf)
  4385. return sprintf(page, "%d\n", conf->bypass_threshold);
  4386. else
  4387. return 0;
  4388. }
  4389. static ssize_t
  4390. raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
  4391. {
  4392. struct r5conf *conf = mddev->private;
  4393. unsigned long new;
  4394. if (len >= PAGE_SIZE)
  4395. return -EINVAL;
  4396. if (!conf)
  4397. return -ENODEV;
  4398. if (strict_strtoul(page, 10, &new))
  4399. return -EINVAL;
  4400. if (new > conf->max_nr_stripes)
  4401. return -EINVAL;
  4402. conf->bypass_threshold = new;
  4403. return len;
  4404. }
  4405. static struct md_sysfs_entry
  4406. raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
  4407. S_IRUGO | S_IWUSR,
  4408. raid5_show_preread_threshold,
  4409. raid5_store_preread_threshold);
  4410. static ssize_t
  4411. stripe_cache_active_show(struct mddev *mddev, char *page)
  4412. {
  4413. struct r5conf *conf = mddev->private;
  4414. if (conf)
  4415. return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
  4416. else
  4417. return 0;
  4418. }
  4419. static struct md_sysfs_entry
  4420. raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
  4421. static struct attribute *raid5_attrs[] = {
  4422. &raid5_stripecache_size.attr,
  4423. &raid5_stripecache_active.attr,
  4424. &raid5_preread_bypass_threshold.attr,
  4425. NULL,
  4426. };
  4427. static struct attribute_group raid5_attrs_group = {
  4428. .name = NULL,
  4429. .attrs = raid5_attrs,
  4430. };
  4431. static sector_t
  4432. raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
  4433. {
  4434. struct r5conf *conf = mddev->private;
  4435. if (!sectors)
  4436. sectors = mddev->dev_sectors;
  4437. if (!raid_disks)
  4438. /* size is defined by the smallest of previous and new size */
  4439. raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
  4440. sectors &= ~((sector_t)mddev->chunk_sectors - 1);
  4441. sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
  4442. return sectors * (raid_disks - conf->max_degraded);
  4443. }
  4444. static void raid5_free_percpu(struct r5conf *conf)
  4445. {
  4446. struct raid5_percpu *percpu;
  4447. unsigned long cpu;
  4448. if (!conf->percpu)
  4449. return;
  4450. get_online_cpus();
  4451. for_each_possible_cpu(cpu) {
  4452. percpu = per_cpu_ptr(conf->percpu, cpu);
  4453. safe_put_page(percpu->spare_page);
  4454. kfree(percpu->scribble);
  4455. }
  4456. #ifdef CONFIG_HOTPLUG_CPU
  4457. unregister_cpu_notifier(&conf->cpu_notify);
  4458. #endif
  4459. put_online_cpus();
  4460. free_percpu(conf->percpu);
  4461. }
  4462. static void free_conf(struct r5conf *conf)
  4463. {
  4464. shrink_stripes(conf);
  4465. raid5_free_percpu(conf);
  4466. kfree(conf->disks);
  4467. kfree(conf->stripe_hashtbl);
  4468. kfree(conf);
  4469. }
  4470. #ifdef CONFIG_HOTPLUG_CPU
  4471. static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
  4472. void *hcpu)
  4473. {
  4474. struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
  4475. long cpu = (long)hcpu;
  4476. struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
  4477. switch (action) {
  4478. case CPU_UP_PREPARE:
  4479. case CPU_UP_PREPARE_FROZEN:
  4480. if (conf->level == 6 && !percpu->spare_page)
  4481. percpu->spare_page = alloc_page(GFP_KERNEL);
  4482. if (!percpu->scribble)
  4483. percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
  4484. if (!percpu->scribble ||
  4485. (conf->level == 6 && !percpu->spare_page)) {
  4486. safe_put_page(percpu->spare_page);
  4487. kfree(percpu->scribble);
  4488. pr_err("%s: failed memory allocation for cpu%ld\n",
  4489. __func__, cpu);
  4490. return notifier_from_errno(-ENOMEM);
  4491. }
  4492. break;
  4493. case CPU_DEAD:
  4494. case CPU_DEAD_FROZEN:
  4495. safe_put_page(percpu->spare_page);
  4496. kfree(percpu->scribble);
  4497. percpu->spare_page = NULL;
  4498. percpu->scribble = NULL;
  4499. break;
  4500. default:
  4501. break;
  4502. }
  4503. return NOTIFY_OK;
  4504. }
  4505. #endif
  4506. static int raid5_alloc_percpu(struct r5conf *conf)
  4507. {
  4508. unsigned long cpu;
  4509. struct page *spare_page;
  4510. struct raid5_percpu __percpu *allcpus;
  4511. void *scribble;
  4512. int err;
  4513. allcpus = alloc_percpu(struct raid5_percpu);
  4514. if (!allcpus)
  4515. return -ENOMEM;
  4516. conf->percpu = allcpus;
  4517. get_online_cpus();
  4518. err = 0;
  4519. for_each_present_cpu(cpu) {
  4520. if (conf->level == 6) {
  4521. spare_page = alloc_page(GFP_KERNEL);
  4522. if (!spare_page) {
  4523. err = -ENOMEM;
  4524. break;
  4525. }
  4526. per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
  4527. }
  4528. scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
  4529. if (!scribble) {
  4530. err = -ENOMEM;
  4531. break;
  4532. }
  4533. per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
  4534. }
  4535. #ifdef CONFIG_HOTPLUG_CPU
  4536. conf->cpu_notify.notifier_call = raid456_cpu_notify;
  4537. conf->cpu_notify.priority = 0;
  4538. if (err == 0)
  4539. err = register_cpu_notifier(&conf->cpu_notify);
  4540. #endif
  4541. put_online_cpus();
  4542. return err;
  4543. }
  4544. static struct r5conf *setup_conf(struct mddev *mddev)
  4545. {
  4546. struct r5conf *conf;
  4547. int raid_disk, memory, max_disks;
  4548. struct md_rdev *rdev;
  4549. struct disk_info *disk;
  4550. char pers_name[6];
  4551. if (mddev->new_level != 5
  4552. && mddev->new_level != 4
  4553. && mddev->new_level != 6) {
  4554. printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
  4555. mdname(mddev), mddev->new_level);
  4556. return ERR_PTR(-EIO);
  4557. }
  4558. if ((mddev->new_level == 5
  4559. && !algorithm_valid_raid5(mddev->new_layout)) ||
  4560. (mddev->new_level == 6
  4561. && !algorithm_valid_raid6(mddev->new_layout))) {
  4562. printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
  4563. mdname(mddev), mddev->new_layout);
  4564. return ERR_PTR(-EIO);
  4565. }
  4566. if (mddev->new_level == 6 && mddev->raid_disks < 4) {
  4567. printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
  4568. mdname(mddev), mddev->raid_disks);
  4569. return ERR_PTR(-EINVAL);
  4570. }
  4571. if (!mddev->new_chunk_sectors ||
  4572. (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
  4573. !is_power_of_2(mddev->new_chunk_sectors)) {
  4574. printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
  4575. mdname(mddev), mddev->new_chunk_sectors << 9);
  4576. return ERR_PTR(-EINVAL);
  4577. }
  4578. conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
  4579. if (conf == NULL)
  4580. goto abort;
  4581. spin_lock_init(&conf->device_lock);
  4582. init_waitqueue_head(&conf->wait_for_stripe);
  4583. init_waitqueue_head(&conf->wait_for_overlap);
  4584. INIT_LIST_HEAD(&conf->handle_list);
  4585. INIT_LIST_HEAD(&conf->hold_list);
  4586. INIT_LIST_HEAD(&conf->delayed_list);
  4587. INIT_LIST_HEAD(&conf->bitmap_list);
  4588. INIT_LIST_HEAD(&conf->inactive_list);
  4589. atomic_set(&conf->active_stripes, 0);
  4590. atomic_set(&conf->preread_active_stripes, 0);
  4591. atomic_set(&conf->active_aligned_reads, 0);
  4592. conf->bypass_threshold = BYPASS_THRESHOLD;
  4593. conf->recovery_disabled = mddev->recovery_disabled - 1;
  4594. conf->raid_disks = mddev->raid_disks;
  4595. if (mddev->reshape_position == MaxSector)
  4596. conf->previous_raid_disks = mddev->raid_disks;
  4597. else
  4598. conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
  4599. max_disks = max(conf->raid_disks, conf->previous_raid_disks);
  4600. conf->scribble_len = scribble_len(max_disks);
  4601. conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
  4602. GFP_KERNEL);
  4603. if (!conf->disks)
  4604. goto abort;
  4605. conf->mddev = mddev;
  4606. if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
  4607. goto abort;
  4608. conf->level = mddev->new_level;
  4609. if (raid5_alloc_percpu(conf) != 0)
  4610. goto abort;
  4611. pr_debug("raid456: run(%s) called.\n", mdname(mddev));
  4612. rdev_for_each(rdev, mddev) {
  4613. raid_disk = rdev->raid_disk;
  4614. if (raid_disk >= max_disks
  4615. || raid_disk < 0)
  4616. continue;
  4617. disk = conf->disks + raid_disk;
  4618. if (test_bit(Replacement, &rdev->flags)) {
  4619. if (disk->replacement)
  4620. goto abort;
  4621. disk->replacement = rdev;
  4622. } else {
  4623. if (disk->rdev)
  4624. goto abort;
  4625. disk->rdev = rdev;
  4626. }
  4627. if (test_bit(In_sync, &rdev->flags)) {
  4628. char b[BDEVNAME_SIZE];
  4629. printk(KERN_INFO "md/raid:%s: device %s operational as raid"
  4630. " disk %d\n",
  4631. mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
  4632. } else if (rdev->saved_raid_disk != raid_disk)
  4633. /* Cannot rely on bitmap to complete recovery */
  4634. conf->fullsync = 1;
  4635. }
  4636. conf->chunk_sectors = mddev->new_chunk_sectors;
  4637. conf->level = mddev->new_level;
  4638. if (conf->level == 6)
  4639. conf->max_degraded = 2;
  4640. else
  4641. conf->max_degraded = 1;
  4642. conf->algorithm = mddev->new_layout;
  4643. conf->max_nr_stripes = NR_STRIPES;
  4644. conf->reshape_progress = mddev->reshape_position;
  4645. if (conf->reshape_progress != MaxSector) {
  4646. conf->prev_chunk_sectors = mddev->chunk_sectors;
  4647. conf->prev_algo = mddev->layout;
  4648. }
  4649. memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
  4650. max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
  4651. if (grow_stripes(conf, conf->max_nr_stripes)) {
  4652. printk(KERN_ERR
  4653. "md/raid:%s: couldn't allocate %dkB for buffers\n",
  4654. mdname(mddev), memory);
  4655. goto abort;
  4656. } else
  4657. printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
  4658. mdname(mddev), memory);
  4659. sprintf(pers_name, "raid%d", mddev->new_level);
  4660. conf->thread = md_register_thread(raid5d, mddev, pers_name);
  4661. if (!conf->thread) {
  4662. printk(KERN_ERR
  4663. "md/raid:%s: couldn't allocate thread.\n",
  4664. mdname(mddev));
  4665. goto abort;
  4666. }
  4667. return conf;
  4668. abort:
  4669. if (conf) {
  4670. free_conf(conf);
  4671. return ERR_PTR(-EIO);
  4672. } else
  4673. return ERR_PTR(-ENOMEM);
  4674. }
  4675. static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
  4676. {
  4677. switch (algo) {
  4678. case ALGORITHM_PARITY_0:
  4679. if (raid_disk < max_degraded)
  4680. return 1;
  4681. break;
  4682. case ALGORITHM_PARITY_N:
  4683. if (raid_disk >= raid_disks - max_degraded)
  4684. return 1;
  4685. break;
  4686. case ALGORITHM_PARITY_0_6:
  4687. if (raid_disk == 0 ||
  4688. raid_disk == raid_disks - 1)
  4689. return 1;
  4690. break;
  4691. case ALGORITHM_LEFT_ASYMMETRIC_6:
  4692. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  4693. case ALGORITHM_LEFT_SYMMETRIC_6:
  4694. case ALGORITHM_RIGHT_SYMMETRIC_6:
  4695. if (raid_disk == raid_disks - 1)
  4696. return 1;
  4697. }
  4698. return 0;
  4699. }
  4700. static int run(struct mddev *mddev)
  4701. {
  4702. struct r5conf *conf;
  4703. int working_disks = 0;
  4704. int dirty_parity_disks = 0;
  4705. struct md_rdev *rdev;
  4706. sector_t reshape_offset = 0;
  4707. int i;
  4708. long long min_offset_diff = 0;
  4709. int first = 1;
  4710. if (mddev->recovery_cp != MaxSector)
  4711. printk(KERN_NOTICE "md/raid:%s: not clean"
  4712. " -- starting background reconstruction\n",
  4713. mdname(mddev));
  4714. rdev_for_each(rdev, mddev) {
  4715. long long diff;
  4716. if (rdev->raid_disk < 0)
  4717. continue;
  4718. diff = (rdev->new_data_offset - rdev->data_offset);
  4719. if (first) {
  4720. min_offset_diff = diff;
  4721. first = 0;
  4722. } else if (mddev->reshape_backwards &&
  4723. diff < min_offset_diff)
  4724. min_offset_diff = diff;
  4725. else if (!mddev->reshape_backwards &&
  4726. diff > min_offset_diff)
  4727. min_offset_diff = diff;
  4728. }
  4729. if (mddev->reshape_position != MaxSector) {
  4730. /* Check that we can continue the reshape.
  4731. * Difficulties arise if the stripe we would write to
  4732. * next is at or after the stripe we would read from next.
  4733. * For a reshape that changes the number of devices, this
  4734. * is only possible for a very short time, and mdadm makes
  4735. * sure that time appears to have past before assembling
  4736. * the array. So we fail if that time hasn't passed.
  4737. * For a reshape that keeps the number of devices the same
  4738. * mdadm must be monitoring the reshape can keeping the
  4739. * critical areas read-only and backed up. It will start
  4740. * the array in read-only mode, so we check for that.
  4741. */
  4742. sector_t here_new, here_old;
  4743. int old_disks;
  4744. int max_degraded = (mddev->level == 6 ? 2 : 1);
  4745. if (mddev->new_level != mddev->level) {
  4746. printk(KERN_ERR "md/raid:%s: unsupported reshape "
  4747. "required - aborting.\n",
  4748. mdname(mddev));
  4749. return -EINVAL;
  4750. }
  4751. old_disks = mddev->raid_disks - mddev->delta_disks;
  4752. /* reshape_position must be on a new-stripe boundary, and one
  4753. * further up in new geometry must map after here in old
  4754. * geometry.
  4755. */
  4756. here_new = mddev->reshape_position;
  4757. if (sector_div(here_new, mddev->new_chunk_sectors *
  4758. (mddev->raid_disks - max_degraded))) {
  4759. printk(KERN_ERR "md/raid:%s: reshape_position not "
  4760. "on a stripe boundary\n", mdname(mddev));
  4761. return -EINVAL;
  4762. }
  4763. reshape_offset = here_new * mddev->new_chunk_sectors;
  4764. /* here_new is the stripe we will write to */
  4765. here_old = mddev->reshape_position;
  4766. sector_div(here_old, mddev->chunk_sectors *
  4767. (old_disks-max_degraded));
  4768. /* here_old is the first stripe that we might need to read
  4769. * from */
  4770. if (mddev->delta_disks == 0) {
  4771. if ((here_new * mddev->new_chunk_sectors !=
  4772. here_old * mddev->chunk_sectors)) {
  4773. printk(KERN_ERR "md/raid:%s: reshape position is"
  4774. " confused - aborting\n", mdname(mddev));
  4775. return -EINVAL;
  4776. }
  4777. /* We cannot be sure it is safe to start an in-place
  4778. * reshape. It is only safe if user-space is monitoring
  4779. * and taking constant backups.
  4780. * mdadm always starts a situation like this in
  4781. * readonly mode so it can take control before
  4782. * allowing any writes. So just check for that.
  4783. */
  4784. if (abs(min_offset_diff) >= mddev->chunk_sectors &&
  4785. abs(min_offset_diff) >= mddev->new_chunk_sectors)
  4786. /* not really in-place - so OK */;
  4787. else if (mddev->ro == 0) {
  4788. printk(KERN_ERR "md/raid:%s: in-place reshape "
  4789. "must be started in read-only mode "
  4790. "- aborting\n",
  4791. mdname(mddev));
  4792. return -EINVAL;
  4793. }
  4794. } else if (mddev->reshape_backwards
  4795. ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
  4796. here_old * mddev->chunk_sectors)
  4797. : (here_new * mddev->new_chunk_sectors >=
  4798. here_old * mddev->chunk_sectors + (-min_offset_diff))) {
  4799. /* Reading from the same stripe as writing to - bad */
  4800. printk(KERN_ERR "md/raid:%s: reshape_position too early for "
  4801. "auto-recovery - aborting.\n",
  4802. mdname(mddev));
  4803. return -EINVAL;
  4804. }
  4805. printk(KERN_INFO "md/raid:%s: reshape will continue\n",
  4806. mdname(mddev));
  4807. /* OK, we should be able to continue; */
  4808. } else {
  4809. BUG_ON(mddev->level != mddev->new_level);
  4810. BUG_ON(mddev->layout != mddev->new_layout);
  4811. BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
  4812. BUG_ON(mddev->delta_disks != 0);
  4813. }
  4814. if (mddev->private == NULL)
  4815. conf = setup_conf(mddev);
  4816. else
  4817. conf = mddev->private;
  4818. if (IS_ERR(conf))
  4819. return PTR_ERR(conf);
  4820. conf->min_offset_diff = min_offset_diff;
  4821. mddev->thread = conf->thread;
  4822. conf->thread = NULL;
  4823. mddev->private = conf;
  4824. for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
  4825. i++) {
  4826. rdev = conf->disks[i].rdev;
  4827. if (!rdev && conf->disks[i].replacement) {
  4828. /* The replacement is all we have yet */
  4829. rdev = conf->disks[i].replacement;
  4830. conf->disks[i].replacement = NULL;
  4831. clear_bit(Replacement, &rdev->flags);
  4832. conf->disks[i].rdev = rdev;
  4833. }
  4834. if (!rdev)
  4835. continue;
  4836. if (conf->disks[i].replacement &&
  4837. conf->reshape_progress != MaxSector) {
  4838. /* replacements and reshape simply do not mix. */
  4839. printk(KERN_ERR "md: cannot handle concurrent "
  4840. "replacement and reshape.\n");
  4841. goto abort;
  4842. }
  4843. if (test_bit(In_sync, &rdev->flags)) {
  4844. working_disks++;
  4845. continue;
  4846. }
  4847. /* This disc is not fully in-sync. However if it
  4848. * just stored parity (beyond the recovery_offset),
  4849. * when we don't need to be concerned about the
  4850. * array being dirty.
  4851. * When reshape goes 'backwards', we never have
  4852. * partially completed devices, so we only need
  4853. * to worry about reshape going forwards.
  4854. */
  4855. /* Hack because v0.91 doesn't store recovery_offset properly. */
  4856. if (mddev->major_version == 0 &&
  4857. mddev->minor_version > 90)
  4858. rdev->recovery_offset = reshape_offset;
  4859. if (rdev->recovery_offset < reshape_offset) {
  4860. /* We need to check old and new layout */
  4861. if (!only_parity(rdev->raid_disk,
  4862. conf->algorithm,
  4863. conf->raid_disks,
  4864. conf->max_degraded))
  4865. continue;
  4866. }
  4867. if (!only_parity(rdev->raid_disk,
  4868. conf->prev_algo,
  4869. conf->previous_raid_disks,
  4870. conf->max_degraded))
  4871. continue;
  4872. dirty_parity_disks++;
  4873. }
  4874. /*
  4875. * 0 for a fully functional array, 1 or 2 for a degraded array.
  4876. */
  4877. mddev->degraded = calc_degraded(conf);
  4878. if (has_failed(conf)) {
  4879. printk(KERN_ERR "md/raid:%s: not enough operational devices"
  4880. " (%d/%d failed)\n",
  4881. mdname(mddev), mddev->degraded, conf->raid_disks);
  4882. goto abort;
  4883. }
  4884. /* device size must be a multiple of chunk size */
  4885. mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
  4886. mddev->resync_max_sectors = mddev->dev_sectors;
  4887. if (mddev->degraded > dirty_parity_disks &&
  4888. mddev->recovery_cp != MaxSector) {
  4889. if (mddev->ok_start_degraded)
  4890. printk(KERN_WARNING
  4891. "md/raid:%s: starting dirty degraded array"
  4892. " - data corruption possible.\n",
  4893. mdname(mddev));
  4894. else {
  4895. printk(KERN_ERR
  4896. "md/raid:%s: cannot start dirty degraded array.\n",
  4897. mdname(mddev));
  4898. goto abort;
  4899. }
  4900. }
  4901. if (mddev->degraded == 0)
  4902. printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
  4903. " devices, algorithm %d\n", mdname(mddev), conf->level,
  4904. mddev->raid_disks-mddev->degraded, mddev->raid_disks,
  4905. mddev->new_layout);
  4906. else
  4907. printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
  4908. " out of %d devices, algorithm %d\n",
  4909. mdname(mddev), conf->level,
  4910. mddev->raid_disks - mddev->degraded,
  4911. mddev->raid_disks, mddev->new_layout);
  4912. print_raid5_conf(conf);
  4913. if (conf->reshape_progress != MaxSector) {
  4914. conf->reshape_safe = conf->reshape_progress;
  4915. atomic_set(&conf->reshape_stripes, 0);
  4916. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  4917. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  4918. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  4919. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  4920. mddev->sync_thread = md_register_thread(md_do_sync, mddev,
  4921. "reshape");
  4922. }
  4923. /* Ok, everything is just fine now */
  4924. if (mddev->to_remove == &raid5_attrs_group)
  4925. mddev->to_remove = NULL;
  4926. else if (mddev->kobj.sd &&
  4927. sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
  4928. printk(KERN_WARNING
  4929. "raid5: failed to create sysfs attributes for %s\n",
  4930. mdname(mddev));
  4931. md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
  4932. if (mddev->queue) {
  4933. int chunk_size;
  4934. bool discard_supported = true;
  4935. /* read-ahead size must cover two whole stripes, which
  4936. * is 2 * (datadisks) * chunksize where 'n' is the
  4937. * number of raid devices
  4938. */
  4939. int data_disks = conf->previous_raid_disks - conf->max_degraded;
  4940. int stripe = data_disks *
  4941. ((mddev->chunk_sectors << 9) / PAGE_SIZE);
  4942. if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
  4943. mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
  4944. blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
  4945. mddev->queue->backing_dev_info.congested_data = mddev;
  4946. mddev->queue->backing_dev_info.congested_fn = raid5_congested;
  4947. chunk_size = mddev->chunk_sectors << 9;
  4948. blk_queue_io_min(mddev->queue, chunk_size);
  4949. blk_queue_io_opt(mddev->queue, chunk_size *
  4950. (conf->raid_disks - conf->max_degraded));
  4951. /*
  4952. * We can only discard a whole stripe. It doesn't make sense to
  4953. * discard data disk but write parity disk
  4954. */
  4955. stripe = stripe * PAGE_SIZE;
  4956. /* Round up to power of 2, as discard handling
  4957. * currently assumes that */
  4958. while ((stripe-1) & stripe)
  4959. stripe = (stripe | (stripe-1)) + 1;
  4960. mddev->queue->limits.discard_alignment = stripe;
  4961. mddev->queue->limits.discard_granularity = stripe;
  4962. /*
  4963. * unaligned part of discard request will be ignored, so can't
  4964. * guarantee discard_zerors_data
  4965. */
  4966. mddev->queue->limits.discard_zeroes_data = 0;
  4967. rdev_for_each(rdev, mddev) {
  4968. disk_stack_limits(mddev->gendisk, rdev->bdev,
  4969. rdev->data_offset << 9);
  4970. disk_stack_limits(mddev->gendisk, rdev->bdev,
  4971. rdev->new_data_offset << 9);
  4972. /*
  4973. * discard_zeroes_data is required, otherwise data
  4974. * could be lost. Consider a scenario: discard a stripe
  4975. * (the stripe could be inconsistent if
  4976. * discard_zeroes_data is 0); write one disk of the
  4977. * stripe (the stripe could be inconsistent again
  4978. * depending on which disks are used to calculate
  4979. * parity); the disk is broken; The stripe data of this
  4980. * disk is lost.
  4981. */
  4982. if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
  4983. !bdev_get_queue(rdev->bdev)->
  4984. limits.discard_zeroes_data)
  4985. discard_supported = false;
  4986. }
  4987. if (discard_supported &&
  4988. mddev->queue->limits.max_discard_sectors >= stripe &&
  4989. mddev->queue->limits.discard_granularity >= stripe)
  4990. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
  4991. mddev->queue);
  4992. else
  4993. queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
  4994. mddev->queue);
  4995. }
  4996. return 0;
  4997. abort:
  4998. md_unregister_thread(&mddev->thread);
  4999. print_raid5_conf(conf);
  5000. free_conf(conf);
  5001. mddev->private = NULL;
  5002. printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
  5003. return -EIO;
  5004. }
  5005. static int stop(struct mddev *mddev)
  5006. {
  5007. struct r5conf *conf = mddev->private;
  5008. md_unregister_thread(&mddev->thread);
  5009. if (mddev->queue)
  5010. mddev->queue->backing_dev_info.congested_fn = NULL;
  5011. free_conf(conf);
  5012. mddev->private = NULL;
  5013. mddev->to_remove = &raid5_attrs_group;
  5014. return 0;
  5015. }
  5016. static void status(struct seq_file *seq, struct mddev *mddev)
  5017. {
  5018. struct r5conf *conf = mddev->private;
  5019. int i;
  5020. seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
  5021. mddev->chunk_sectors / 2, mddev->layout);
  5022. seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
  5023. for (i = 0; i < conf->raid_disks; i++)
  5024. seq_printf (seq, "%s",
  5025. conf->disks[i].rdev &&
  5026. test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
  5027. seq_printf (seq, "]");
  5028. }
  5029. static void print_raid5_conf (struct r5conf *conf)
  5030. {
  5031. int i;
  5032. struct disk_info *tmp;
  5033. printk(KERN_DEBUG "RAID conf printout:\n");
  5034. if (!conf) {
  5035. printk("(conf==NULL)\n");
  5036. return;
  5037. }
  5038. printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
  5039. conf->raid_disks,
  5040. conf->raid_disks - conf->mddev->degraded);
  5041. for (i = 0; i < conf->raid_disks; i++) {
  5042. char b[BDEVNAME_SIZE];
  5043. tmp = conf->disks + i;
  5044. if (tmp->rdev)
  5045. printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
  5046. i, !test_bit(Faulty, &tmp->rdev->flags),
  5047. bdevname(tmp->rdev->bdev, b));
  5048. }
  5049. }
  5050. static int raid5_spare_active(struct mddev *mddev)
  5051. {
  5052. int i;
  5053. struct r5conf *conf = mddev->private;
  5054. struct disk_info *tmp;
  5055. int count = 0;
  5056. unsigned long flags;
  5057. for (i = 0; i < conf->raid_disks; i++) {
  5058. tmp = conf->disks + i;
  5059. if (tmp->replacement
  5060. && tmp->replacement->recovery_offset == MaxSector
  5061. && !test_bit(Faulty, &tmp->replacement->flags)
  5062. && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
  5063. /* Replacement has just become active. */
  5064. if (!tmp->rdev
  5065. || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
  5066. count++;
  5067. if (tmp->rdev) {
  5068. /* Replaced device not technically faulty,
  5069. * but we need to be sure it gets removed
  5070. * and never re-added.
  5071. */
  5072. set_bit(Faulty, &tmp->rdev->flags);
  5073. sysfs_notify_dirent_safe(
  5074. tmp->rdev->sysfs_state);
  5075. }
  5076. sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
  5077. } else if (tmp->rdev
  5078. && tmp->rdev->recovery_offset == MaxSector
  5079. && !test_bit(Faulty, &tmp->rdev->flags)
  5080. && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
  5081. count++;
  5082. sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
  5083. }
  5084. }
  5085. spin_lock_irqsave(&conf->device_lock, flags);
  5086. mddev->degraded = calc_degraded(conf);
  5087. spin_unlock_irqrestore(&conf->device_lock, flags);
  5088. print_raid5_conf(conf);
  5089. return count;
  5090. }
  5091. static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
  5092. {
  5093. struct r5conf *conf = mddev->private;
  5094. int err = 0;
  5095. int number = rdev->raid_disk;
  5096. struct md_rdev **rdevp;
  5097. struct disk_info *p = conf->disks + number;
  5098. print_raid5_conf(conf);
  5099. if (rdev == p->rdev)
  5100. rdevp = &p->rdev;
  5101. else if (rdev == p->replacement)
  5102. rdevp = &p->replacement;
  5103. else
  5104. return 0;
  5105. if (number >= conf->raid_disks &&
  5106. conf->reshape_progress == MaxSector)
  5107. clear_bit(In_sync, &rdev->flags);
  5108. if (test_bit(In_sync, &rdev->flags) ||
  5109. atomic_read(&rdev->nr_pending)) {
  5110. err = -EBUSY;
  5111. goto abort;
  5112. }
  5113. /* Only remove non-faulty devices if recovery
  5114. * isn't possible.
  5115. */
  5116. if (!test_bit(Faulty, &rdev->flags) &&
  5117. mddev->recovery_disabled != conf->recovery_disabled &&
  5118. !has_failed(conf) &&
  5119. (!p->replacement || p->replacement == rdev) &&
  5120. number < conf->raid_disks) {
  5121. err = -EBUSY;
  5122. goto abort;
  5123. }
  5124. *rdevp = NULL;
  5125. synchronize_rcu();
  5126. if (atomic_read(&rdev->nr_pending)) {
  5127. /* lost the race, try later */
  5128. err = -EBUSY;
  5129. *rdevp = rdev;
  5130. } else if (p->replacement) {
  5131. /* We must have just cleared 'rdev' */
  5132. p->rdev = p->replacement;
  5133. clear_bit(Replacement, &p->replacement->flags);
  5134. smp_mb(); /* Make sure other CPUs may see both as identical
  5135. * but will never see neither - if they are careful
  5136. */
  5137. p->replacement = NULL;
  5138. clear_bit(WantReplacement, &rdev->flags);
  5139. } else
  5140. /* We might have just removed the Replacement as faulty-
  5141. * clear the bit just in case
  5142. */
  5143. clear_bit(WantReplacement, &rdev->flags);
  5144. abort:
  5145. print_raid5_conf(conf);
  5146. return err;
  5147. }
  5148. static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
  5149. {
  5150. struct r5conf *conf = mddev->private;
  5151. int err = -EEXIST;
  5152. int disk;
  5153. struct disk_info *p;
  5154. int first = 0;
  5155. int last = conf->raid_disks - 1;
  5156. if (mddev->recovery_disabled == conf->recovery_disabled)
  5157. return -EBUSY;
  5158. if (rdev->saved_raid_disk < 0 && has_failed(conf))
  5159. /* no point adding a device */
  5160. return -EINVAL;
  5161. if (rdev->raid_disk >= 0)
  5162. first = last = rdev->raid_disk;
  5163. /*
  5164. * find the disk ... but prefer rdev->saved_raid_disk
  5165. * if possible.
  5166. */
  5167. if (rdev->saved_raid_disk >= 0 &&
  5168. rdev->saved_raid_disk >= first &&
  5169. conf->disks[rdev->saved_raid_disk].rdev == NULL)
  5170. first = rdev->saved_raid_disk;
  5171. for (disk = first; disk <= last; disk++) {
  5172. p = conf->disks + disk;
  5173. if (p->rdev == NULL) {
  5174. clear_bit(In_sync, &rdev->flags);
  5175. rdev->raid_disk = disk;
  5176. err = 0;
  5177. if (rdev->saved_raid_disk != disk)
  5178. conf->fullsync = 1;
  5179. rcu_assign_pointer(p->rdev, rdev);
  5180. goto out;
  5181. }
  5182. }
  5183. for (disk = first; disk <= last; disk++) {
  5184. p = conf->disks + disk;
  5185. if (test_bit(WantReplacement, &p->rdev->flags) &&
  5186. p->replacement == NULL) {
  5187. clear_bit(In_sync, &rdev->flags);
  5188. set_bit(Replacement, &rdev->flags);
  5189. rdev->raid_disk = disk;
  5190. err = 0;
  5191. conf->fullsync = 1;
  5192. rcu_assign_pointer(p->replacement, rdev);
  5193. break;
  5194. }
  5195. }
  5196. out:
  5197. print_raid5_conf(conf);
  5198. return err;
  5199. }
  5200. static int raid5_resize(struct mddev *mddev, sector_t sectors)
  5201. {
  5202. /* no resync is happening, and there is enough space
  5203. * on all devices, so we can resize.
  5204. * We need to make sure resync covers any new space.
  5205. * If the array is shrinking we should possibly wait until
  5206. * any io in the removed space completes, but it hardly seems
  5207. * worth it.
  5208. */
  5209. sector_t newsize;
  5210. sectors &= ~((sector_t)mddev->chunk_sectors - 1);
  5211. newsize = raid5_size(mddev, sectors, mddev->raid_disks);
  5212. if (mddev->external_size &&
  5213. mddev->array_sectors > newsize)
  5214. return -EINVAL;
  5215. if (mddev->bitmap) {
  5216. int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
  5217. if (ret)
  5218. return ret;
  5219. }
  5220. md_set_array_sectors(mddev, newsize);
  5221. set_capacity(mddev->gendisk, mddev->array_sectors);
  5222. revalidate_disk(mddev->gendisk);
  5223. if (sectors > mddev->dev_sectors &&
  5224. mddev->recovery_cp > mddev->dev_sectors) {
  5225. mddev->recovery_cp = mddev->dev_sectors;
  5226. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5227. }
  5228. mddev->dev_sectors = sectors;
  5229. mddev->resync_max_sectors = sectors;
  5230. return 0;
  5231. }
  5232. static int check_stripe_cache(struct mddev *mddev)
  5233. {
  5234. /* Can only proceed if there are plenty of stripe_heads.
  5235. * We need a minimum of one full stripe,, and for sensible progress
  5236. * it is best to have about 4 times that.
  5237. * If we require 4 times, then the default 256 4K stripe_heads will
  5238. * allow for chunk sizes up to 256K, which is probably OK.
  5239. * If the chunk size is greater, user-space should request more
  5240. * stripe_heads first.
  5241. */
  5242. struct r5conf *conf = mddev->private;
  5243. if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
  5244. > conf->max_nr_stripes ||
  5245. ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
  5246. > conf->max_nr_stripes) {
  5247. printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
  5248. mdname(mddev),
  5249. ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
  5250. / STRIPE_SIZE)*4);
  5251. return 0;
  5252. }
  5253. return 1;
  5254. }
  5255. static int check_reshape(struct mddev *mddev)
  5256. {
  5257. struct r5conf *conf = mddev->private;
  5258. if (mddev->delta_disks == 0 &&
  5259. mddev->new_layout == mddev->layout &&
  5260. mddev->new_chunk_sectors == mddev->chunk_sectors)
  5261. return 0; /* nothing to do */
  5262. if (has_failed(conf))
  5263. return -EINVAL;
  5264. if (mddev->delta_disks < 0) {
  5265. /* We might be able to shrink, but the devices must
  5266. * be made bigger first.
  5267. * For raid6, 4 is the minimum size.
  5268. * Otherwise 2 is the minimum
  5269. */
  5270. int min = 2;
  5271. if (mddev->level == 6)
  5272. min = 4;
  5273. if (mddev->raid_disks + mddev->delta_disks < min)
  5274. return -EINVAL;
  5275. }
  5276. if (!check_stripe_cache(mddev))
  5277. return -ENOSPC;
  5278. return resize_stripes(conf, (conf->previous_raid_disks
  5279. + mddev->delta_disks));
  5280. }
  5281. static int raid5_start_reshape(struct mddev *mddev)
  5282. {
  5283. struct r5conf *conf = mddev->private;
  5284. struct md_rdev *rdev;
  5285. int spares = 0;
  5286. unsigned long flags;
  5287. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  5288. return -EBUSY;
  5289. if (!check_stripe_cache(mddev))
  5290. return -ENOSPC;
  5291. if (has_failed(conf))
  5292. return -EINVAL;
  5293. rdev_for_each(rdev, mddev) {
  5294. if (!test_bit(In_sync, &rdev->flags)
  5295. && !test_bit(Faulty, &rdev->flags))
  5296. spares++;
  5297. }
  5298. if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
  5299. /* Not enough devices even to make a degraded array
  5300. * of that size
  5301. */
  5302. return -EINVAL;
  5303. /* Refuse to reduce size of the array. Any reductions in
  5304. * array size must be through explicit setting of array_size
  5305. * attribute.
  5306. */
  5307. if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
  5308. < mddev->array_sectors) {
  5309. printk(KERN_ERR "md/raid:%s: array size must be reduced "
  5310. "before number of disks\n", mdname(mddev));
  5311. return -EINVAL;
  5312. }
  5313. atomic_set(&conf->reshape_stripes, 0);
  5314. spin_lock_irq(&conf->device_lock);
  5315. conf->previous_raid_disks = conf->raid_disks;
  5316. conf->raid_disks += mddev->delta_disks;
  5317. conf->prev_chunk_sectors = conf->chunk_sectors;
  5318. conf->chunk_sectors = mddev->new_chunk_sectors;
  5319. conf->prev_algo = conf->algorithm;
  5320. conf->algorithm = mddev->new_layout;
  5321. conf->generation++;
  5322. /* Code that selects data_offset needs to see the generation update
  5323. * if reshape_progress has been set - so a memory barrier needed.
  5324. */
  5325. smp_mb();
  5326. if (mddev->reshape_backwards)
  5327. conf->reshape_progress = raid5_size(mddev, 0, 0);
  5328. else
  5329. conf->reshape_progress = 0;
  5330. conf->reshape_safe = conf->reshape_progress;
  5331. spin_unlock_irq(&conf->device_lock);
  5332. /* Add some new drives, as many as will fit.
  5333. * We know there are enough to make the newly sized array work.
  5334. * Don't add devices if we are reducing the number of
  5335. * devices in the array. This is because it is not possible
  5336. * to correctly record the "partially reconstructed" state of
  5337. * such devices during the reshape and confusion could result.
  5338. */
  5339. if (mddev->delta_disks >= 0) {
  5340. rdev_for_each(rdev, mddev)
  5341. if (rdev->raid_disk < 0 &&
  5342. !test_bit(Faulty, &rdev->flags)) {
  5343. if (raid5_add_disk(mddev, rdev) == 0) {
  5344. if (rdev->raid_disk
  5345. >= conf->previous_raid_disks)
  5346. set_bit(In_sync, &rdev->flags);
  5347. else
  5348. rdev->recovery_offset = 0;
  5349. if (sysfs_link_rdev(mddev, rdev))
  5350. /* Failure here is OK */;
  5351. }
  5352. } else if (rdev->raid_disk >= conf->previous_raid_disks
  5353. && !test_bit(Faulty, &rdev->flags)) {
  5354. /* This is a spare that was manually added */
  5355. set_bit(In_sync, &rdev->flags);
  5356. }
  5357. /* When a reshape changes the number of devices,
  5358. * ->degraded is measured against the larger of the
  5359. * pre and post number of devices.
  5360. */
  5361. spin_lock_irqsave(&conf->device_lock, flags);
  5362. mddev->degraded = calc_degraded(conf);
  5363. spin_unlock_irqrestore(&conf->device_lock, flags);
  5364. }
  5365. mddev->raid_disks = conf->raid_disks;
  5366. mddev->reshape_position = conf->reshape_progress;
  5367. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  5368. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  5369. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  5370. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  5371. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  5372. mddev->sync_thread = md_register_thread(md_do_sync, mddev,
  5373. "reshape");
  5374. if (!mddev->sync_thread) {
  5375. mddev->recovery = 0;
  5376. spin_lock_irq(&conf->device_lock);
  5377. mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
  5378. rdev_for_each(rdev, mddev)
  5379. rdev->new_data_offset = rdev->data_offset;
  5380. smp_wmb();
  5381. conf->reshape_progress = MaxSector;
  5382. mddev->reshape_position = MaxSector;
  5383. spin_unlock_irq(&conf->device_lock);
  5384. return -EAGAIN;
  5385. }
  5386. conf->reshape_checkpoint = jiffies;
  5387. md_wakeup_thread(mddev->sync_thread);
  5388. md_new_event(mddev);
  5389. return 0;
  5390. }
  5391. /* This is called from the reshape thread and should make any
  5392. * changes needed in 'conf'
  5393. */
  5394. static void end_reshape(struct r5conf *conf)
  5395. {
  5396. if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
  5397. struct md_rdev *rdev;
  5398. spin_lock_irq(&conf->device_lock);
  5399. conf->previous_raid_disks = conf->raid_disks;
  5400. rdev_for_each(rdev, conf->mddev)
  5401. rdev->data_offset = rdev->new_data_offset;
  5402. smp_wmb();
  5403. conf->reshape_progress = MaxSector;
  5404. spin_unlock_irq(&conf->device_lock);
  5405. wake_up(&conf->wait_for_overlap);
  5406. /* read-ahead size must cover two whole stripes, which is
  5407. * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
  5408. */
  5409. if (conf->mddev->queue) {
  5410. int data_disks = conf->raid_disks - conf->max_degraded;
  5411. int stripe = data_disks * ((conf->chunk_sectors << 9)
  5412. / PAGE_SIZE);
  5413. if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
  5414. conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
  5415. }
  5416. }
  5417. }
  5418. /* This is called from the raid5d thread with mddev_lock held.
  5419. * It makes config changes to the device.
  5420. */
  5421. static void raid5_finish_reshape(struct mddev *mddev)
  5422. {
  5423. struct r5conf *conf = mddev->private;
  5424. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  5425. if (mddev->delta_disks > 0) {
  5426. md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
  5427. set_capacity(mddev->gendisk, mddev->array_sectors);
  5428. revalidate_disk(mddev->gendisk);
  5429. } else {
  5430. int d;
  5431. spin_lock_irq(&conf->device_lock);
  5432. mddev->degraded = calc_degraded(conf);
  5433. spin_unlock_irq(&conf->device_lock);
  5434. for (d = conf->raid_disks ;
  5435. d < conf->raid_disks - mddev->delta_disks;
  5436. d++) {
  5437. struct md_rdev *rdev = conf->disks[d].rdev;
  5438. if (rdev)
  5439. clear_bit(In_sync, &rdev->flags);
  5440. rdev = conf->disks[d].replacement;
  5441. if (rdev)
  5442. clear_bit(In_sync, &rdev->flags);
  5443. }
  5444. }
  5445. mddev->layout = conf->algorithm;
  5446. mddev->chunk_sectors = conf->chunk_sectors;
  5447. mddev->reshape_position = MaxSector;
  5448. mddev->delta_disks = 0;
  5449. mddev->reshape_backwards = 0;
  5450. }
  5451. }
  5452. static void raid5_quiesce(struct mddev *mddev, int state)
  5453. {
  5454. struct r5conf *conf = mddev->private;
  5455. switch(state) {
  5456. case 2: /* resume for a suspend */
  5457. wake_up(&conf->wait_for_overlap);
  5458. break;
  5459. case 1: /* stop all writes */
  5460. spin_lock_irq(&conf->device_lock);
  5461. /* '2' tells resync/reshape to pause so that all
  5462. * active stripes can drain
  5463. */
  5464. conf->quiesce = 2;
  5465. wait_event_lock_irq(conf->wait_for_stripe,
  5466. atomic_read(&conf->active_stripes) == 0 &&
  5467. atomic_read(&conf->active_aligned_reads) == 0,
  5468. conf->device_lock);
  5469. conf->quiesce = 1;
  5470. spin_unlock_irq(&conf->device_lock);
  5471. /* allow reshape to continue */
  5472. wake_up(&conf->wait_for_overlap);
  5473. break;
  5474. case 0: /* re-enable writes */
  5475. spin_lock_irq(&conf->device_lock);
  5476. conf->quiesce = 0;
  5477. wake_up(&conf->wait_for_stripe);
  5478. wake_up(&conf->wait_for_overlap);
  5479. spin_unlock_irq(&conf->device_lock);
  5480. break;
  5481. }
  5482. }
  5483. static void *raid45_takeover_raid0(struct mddev *mddev, int level)
  5484. {
  5485. struct r0conf *raid0_conf = mddev->private;
  5486. sector_t sectors;
  5487. /* for raid0 takeover only one zone is supported */
  5488. if (raid0_conf->nr_strip_zones > 1) {
  5489. printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
  5490. mdname(mddev));
  5491. return ERR_PTR(-EINVAL);
  5492. }
  5493. sectors = raid0_conf->strip_zone[0].zone_end;
  5494. sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
  5495. mddev->dev_sectors = sectors;
  5496. mddev->new_level = level;
  5497. mddev->new_layout = ALGORITHM_PARITY_N;
  5498. mddev->new_chunk_sectors = mddev->chunk_sectors;
  5499. mddev->raid_disks += 1;
  5500. mddev->delta_disks = 1;
  5501. /* make sure it will be not marked as dirty */
  5502. mddev->recovery_cp = MaxSector;
  5503. return setup_conf(mddev);
  5504. }
  5505. static void *raid5_takeover_raid1(struct mddev *mddev)
  5506. {
  5507. int chunksect;
  5508. if (mddev->raid_disks != 2 ||
  5509. mddev->degraded > 1)
  5510. return ERR_PTR(-EINVAL);
  5511. /* Should check if there are write-behind devices? */
  5512. chunksect = 64*2; /* 64K by default */
  5513. /* The array must be an exact multiple of chunksize */
  5514. while (chunksect && (mddev->array_sectors & (chunksect-1)))
  5515. chunksect >>= 1;
  5516. if ((chunksect<<9) < STRIPE_SIZE)
  5517. /* array size does not allow a suitable chunk size */
  5518. return ERR_PTR(-EINVAL);
  5519. mddev->new_level = 5;
  5520. mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
  5521. mddev->new_chunk_sectors = chunksect;
  5522. return setup_conf(mddev);
  5523. }
  5524. static void *raid5_takeover_raid6(struct mddev *mddev)
  5525. {
  5526. int new_layout;
  5527. switch (mddev->layout) {
  5528. case ALGORITHM_LEFT_ASYMMETRIC_6:
  5529. new_layout = ALGORITHM_LEFT_ASYMMETRIC;
  5530. break;
  5531. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  5532. new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
  5533. break;
  5534. case ALGORITHM_LEFT_SYMMETRIC_6:
  5535. new_layout = ALGORITHM_LEFT_SYMMETRIC;
  5536. break;
  5537. case ALGORITHM_RIGHT_SYMMETRIC_6:
  5538. new_layout = ALGORITHM_RIGHT_SYMMETRIC;
  5539. break;
  5540. case ALGORITHM_PARITY_0_6:
  5541. new_layout = ALGORITHM_PARITY_0;
  5542. break;
  5543. case ALGORITHM_PARITY_N:
  5544. new_layout = ALGORITHM_PARITY_N;
  5545. break;
  5546. default:
  5547. return ERR_PTR(-EINVAL);
  5548. }
  5549. mddev->new_level = 5;
  5550. mddev->new_layout = new_layout;
  5551. mddev->delta_disks = -1;
  5552. mddev->raid_disks -= 1;
  5553. return setup_conf(mddev);
  5554. }
  5555. static int raid5_check_reshape(struct mddev *mddev)
  5556. {
  5557. /* For a 2-drive array, the layout and chunk size can be changed
  5558. * immediately as not restriping is needed.
  5559. * For larger arrays we record the new value - after validation
  5560. * to be used by a reshape pass.
  5561. */
  5562. struct r5conf *conf = mddev->private;
  5563. int new_chunk = mddev->new_chunk_sectors;
  5564. if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
  5565. return -EINVAL;
  5566. if (new_chunk > 0) {
  5567. if (!is_power_of_2(new_chunk))
  5568. return -EINVAL;
  5569. if (new_chunk < (PAGE_SIZE>>9))
  5570. return -EINVAL;
  5571. if (mddev->array_sectors & (new_chunk-1))
  5572. /* not factor of array size */
  5573. return -EINVAL;
  5574. }
  5575. /* They look valid */
  5576. if (mddev->raid_disks == 2) {
  5577. /* can make the change immediately */
  5578. if (mddev->new_layout >= 0) {
  5579. conf->algorithm = mddev->new_layout;
  5580. mddev->layout = mddev->new_layout;
  5581. }
  5582. if (new_chunk > 0) {
  5583. conf->chunk_sectors = new_chunk ;
  5584. mddev->chunk_sectors = new_chunk;
  5585. }
  5586. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  5587. md_wakeup_thread(mddev->thread);
  5588. }
  5589. return check_reshape(mddev);
  5590. }
  5591. static int raid6_check_reshape(struct mddev *mddev)
  5592. {
  5593. int new_chunk = mddev->new_chunk_sectors;
  5594. if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
  5595. return -EINVAL;
  5596. if (new_chunk > 0) {
  5597. if (!is_power_of_2(new_chunk))
  5598. return -EINVAL;
  5599. if (new_chunk < (PAGE_SIZE >> 9))
  5600. return -EINVAL;
  5601. if (mddev->array_sectors & (new_chunk-1))
  5602. /* not factor of array size */
  5603. return -EINVAL;
  5604. }
  5605. /* They look valid */
  5606. return check_reshape(mddev);
  5607. }
  5608. static void *raid5_takeover(struct mddev *mddev)
  5609. {
  5610. /* raid5 can take over:
  5611. * raid0 - if there is only one strip zone - make it a raid4 layout
  5612. * raid1 - if there are two drives. We need to know the chunk size
  5613. * raid4 - trivial - just use a raid4 layout.
  5614. * raid6 - Providing it is a *_6 layout
  5615. */
  5616. if (mddev->level == 0)
  5617. return raid45_takeover_raid0(mddev, 5);
  5618. if (mddev->level == 1)
  5619. return raid5_takeover_raid1(mddev);
  5620. if (mddev->level == 4) {
  5621. mddev->new_layout = ALGORITHM_PARITY_N;
  5622. mddev->new_level = 5;
  5623. return setup_conf(mddev);
  5624. }
  5625. if (mddev->level == 6)
  5626. return raid5_takeover_raid6(mddev);
  5627. return ERR_PTR(-EINVAL);
  5628. }
  5629. static void *raid4_takeover(struct mddev *mddev)
  5630. {
  5631. /* raid4 can take over:
  5632. * raid0 - if there is only one strip zone
  5633. * raid5 - if layout is right
  5634. */
  5635. if (mddev->level == 0)
  5636. return raid45_takeover_raid0(mddev, 4);
  5637. if (mddev->level == 5 &&
  5638. mddev->layout == ALGORITHM_PARITY_N) {
  5639. mddev->new_layout = 0;
  5640. mddev->new_level = 4;
  5641. return setup_conf(mddev);
  5642. }
  5643. return ERR_PTR(-EINVAL);
  5644. }
  5645. static struct md_personality raid5_personality;
  5646. static void *raid6_takeover(struct mddev *mddev)
  5647. {
  5648. /* Currently can only take over a raid5. We map the
  5649. * personality to an equivalent raid6 personality
  5650. * with the Q block at the end.
  5651. */
  5652. int new_layout;
  5653. if (mddev->pers != &raid5_personality)
  5654. return ERR_PTR(-EINVAL);
  5655. if (mddev->degraded > 1)
  5656. return ERR_PTR(-EINVAL);
  5657. if (mddev->raid_disks > 253)
  5658. return ERR_PTR(-EINVAL);
  5659. if (mddev->raid_disks < 3)
  5660. return ERR_PTR(-EINVAL);
  5661. switch (mddev->layout) {
  5662. case ALGORITHM_LEFT_ASYMMETRIC:
  5663. new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
  5664. break;
  5665. case ALGORITHM_RIGHT_ASYMMETRIC:
  5666. new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
  5667. break;
  5668. case ALGORITHM_LEFT_SYMMETRIC:
  5669. new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
  5670. break;
  5671. case ALGORITHM_RIGHT_SYMMETRIC:
  5672. new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
  5673. break;
  5674. case ALGORITHM_PARITY_0:
  5675. new_layout = ALGORITHM_PARITY_0_6;
  5676. break;
  5677. case ALGORITHM_PARITY_N:
  5678. new_layout = ALGORITHM_PARITY_N;
  5679. break;
  5680. default:
  5681. return ERR_PTR(-EINVAL);
  5682. }
  5683. mddev->new_level = 6;
  5684. mddev->new_layout = new_layout;
  5685. mddev->delta_disks = 1;
  5686. mddev->raid_disks += 1;
  5687. return setup_conf(mddev);
  5688. }
  5689. static struct md_personality raid6_personality =
  5690. {
  5691. .name = "raid6",
  5692. .level = 6,
  5693. .owner = THIS_MODULE,
  5694. .make_request = make_request,
  5695. .run = run,
  5696. .stop = stop,
  5697. .status = status,
  5698. .error_handler = error,
  5699. .hot_add_disk = raid5_add_disk,
  5700. .hot_remove_disk= raid5_remove_disk,
  5701. .spare_active = raid5_spare_active,
  5702. .sync_request = sync_request,
  5703. .resize = raid5_resize,
  5704. .size = raid5_size,
  5705. .check_reshape = raid6_check_reshape,
  5706. .start_reshape = raid5_start_reshape,
  5707. .finish_reshape = raid5_finish_reshape,
  5708. .quiesce = raid5_quiesce,
  5709. .takeover = raid6_takeover,
  5710. };
  5711. static struct md_personality raid5_personality =
  5712. {
  5713. .name = "raid5",
  5714. .level = 5,
  5715. .owner = THIS_MODULE,
  5716. .make_request = make_request,
  5717. .run = run,
  5718. .stop = stop,
  5719. .status = status,
  5720. .error_handler = error,
  5721. .hot_add_disk = raid5_add_disk,
  5722. .hot_remove_disk= raid5_remove_disk,
  5723. .spare_active = raid5_spare_active,
  5724. .sync_request = sync_request,
  5725. .resize = raid5_resize,
  5726. .size = raid5_size,
  5727. .check_reshape = raid5_check_reshape,
  5728. .start_reshape = raid5_start_reshape,
  5729. .finish_reshape = raid5_finish_reshape,
  5730. .quiesce = raid5_quiesce,
  5731. .takeover = raid5_takeover,
  5732. };
  5733. static struct md_personality raid4_personality =
  5734. {
  5735. .name = "raid4",
  5736. .level = 4,
  5737. .owner = THIS_MODULE,
  5738. .make_request = make_request,
  5739. .run = run,
  5740. .stop = stop,
  5741. .status = status,
  5742. .error_handler = error,
  5743. .hot_add_disk = raid5_add_disk,
  5744. .hot_remove_disk= raid5_remove_disk,
  5745. .spare_active = raid5_spare_active,
  5746. .sync_request = sync_request,
  5747. .resize = raid5_resize,
  5748. .size = raid5_size,
  5749. .check_reshape = raid5_check_reshape,
  5750. .start_reshape = raid5_start_reshape,
  5751. .finish_reshape = raid5_finish_reshape,
  5752. .quiesce = raid5_quiesce,
  5753. .takeover = raid4_takeover,
  5754. };
  5755. static int __init raid5_init(void)
  5756. {
  5757. register_md_personality(&raid6_personality);
  5758. register_md_personality(&raid5_personality);
  5759. register_md_personality(&raid4_personality);
  5760. return 0;
  5761. }
  5762. static void raid5_exit(void)
  5763. {
  5764. unregister_md_personality(&raid6_personality);
  5765. unregister_md_personality(&raid5_personality);
  5766. unregister_md_personality(&raid4_personality);
  5767. }
  5768. module_init(raid5_init);
  5769. module_exit(raid5_exit);
  5770. MODULE_LICENSE("GPL");
  5771. MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
  5772. MODULE_ALIAS("md-personality-4"); /* RAID5 */
  5773. MODULE_ALIAS("md-raid5");
  5774. MODULE_ALIAS("md-raid4");
  5775. MODULE_ALIAS("md-level-5");
  5776. MODULE_ALIAS("md-level-4");
  5777. MODULE_ALIAS("md-personality-8"); /* RAID6 */
  5778. MODULE_ALIAS("md-raid6");
  5779. MODULE_ALIAS("md-level-6");
  5780. /* This used to be two separate modules, they were: */
  5781. MODULE_ALIAS("raid5");
  5782. MODULE_ALIAS("raid6");