raid5.c 180 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431
  1. /*
  2. * raid5.c : Multiple Devices driver for Linux
  3. * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
  4. * Copyright (C) 1999, 2000 Ingo Molnar
  5. * Copyright (C) 2002, 2003 H. Peter Anvin
  6. *
  7. * RAID-4/5/6 management functions.
  8. * Thanks to Penguin Computing for making the RAID-6 development possible
  9. * by donating a test server!
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * (for example /usr/src/linux/COPYING); if not, write to the Free
  18. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19. */
  20. /*
  21. * BITMAP UNPLUGGING:
  22. *
  23. * The sequencing for updating the bitmap reliably is a little
  24. * subtle (and I got it wrong the first time) so it deserves some
  25. * explanation.
  26. *
  27. * We group bitmap updates into batches. Each batch has a number.
  28. * We may write out several batches at once, but that isn't very important.
  29. * conf->seq_write is the number of the last batch successfully written.
  30. * conf->seq_flush is the number of the last batch that was closed to
  31. * new additions.
  32. * When we discover that we will need to write to any block in a stripe
  33. * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
  34. * the number of the batch it will be in. This is seq_flush+1.
  35. * When we are ready to do a write, if that batch hasn't been written yet,
  36. * we plug the array and queue the stripe for later.
  37. * When an unplug happens, we increment bm_flush, thus closing the current
  38. * batch.
  39. * When we notice that bm_flush > bm_write, we write out all pending updates
  40. * to the bitmap, and advance bm_write to where bm_flush was.
  41. * This may occasionally write a bit out twice, but is sure never to
  42. * miss any bits.
  43. */
  44. #include <linux/blkdev.h>
  45. #include <linux/kthread.h>
  46. #include <linux/raid/pq.h>
  47. #include <linux/async_tx.h>
  48. #include <linux/module.h>
  49. #include <linux/async.h>
  50. #include <linux/seq_file.h>
  51. #include <linux/cpu.h>
  52. #include <linux/slab.h>
  53. #include <linux/ratelimit.h>
  54. #include <trace/events/block.h>
  55. #include "md.h"
  56. #include "raid5.h"
  57. #include "raid0.h"
  58. #include "bitmap.h"
  59. /*
  60. * Stripe cache
  61. */
  62. #define NR_STRIPES 256
  63. #define STRIPE_SIZE PAGE_SIZE
  64. #define STRIPE_SHIFT (PAGE_SHIFT - 9)
  65. #define STRIPE_SECTORS (STRIPE_SIZE>>9)
  66. #define IO_THRESHOLD 1
  67. #define BYPASS_THRESHOLD 1
  68. #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
  69. #define HASH_MASK (NR_HASH - 1)
  70. static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
  71. {
  72. int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
  73. return &conf->stripe_hashtbl[hash];
  74. }
  75. /* bio's attached to a stripe+device for I/O are linked together in bi_sector
  76. * order without overlap. There may be several bio's per stripe+device, and
  77. * a bio could span several devices.
  78. * When walking this list for a particular stripe+device, we must never proceed
  79. * beyond a bio that extends past this device, as the next bio might no longer
  80. * be valid.
  81. * This function is used to determine the 'next' bio in the list, given the sector
  82. * of the current stripe+device
  83. */
  84. static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
  85. {
  86. int sectors = bio_sectors(bio);
  87. if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
  88. return bio->bi_next;
  89. else
  90. return NULL;
  91. }
  92. /*
  93. * We maintain a biased count of active stripes in the bottom 16 bits of
  94. * bi_phys_segments, and a count of processed stripes in the upper 16 bits
  95. */
  96. static inline int raid5_bi_processed_stripes(struct bio *bio)
  97. {
  98. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  99. return (atomic_read(segments) >> 16) & 0xffff;
  100. }
  101. static inline int raid5_dec_bi_active_stripes(struct bio *bio)
  102. {
  103. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  104. return atomic_sub_return(1, segments) & 0xffff;
  105. }
  106. static inline void raid5_inc_bi_active_stripes(struct bio *bio)
  107. {
  108. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  109. atomic_inc(segments);
  110. }
  111. static inline void raid5_set_bi_processed_stripes(struct bio *bio,
  112. unsigned int cnt)
  113. {
  114. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  115. int old, new;
  116. do {
  117. old = atomic_read(segments);
  118. new = (old & 0xffff) | (cnt << 16);
  119. } while (atomic_cmpxchg(segments, old, new) != old);
  120. }
  121. static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
  122. {
  123. atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
  124. atomic_set(segments, cnt);
  125. }
  126. /* Find first data disk in a raid6 stripe */
  127. static inline int raid6_d0(struct stripe_head *sh)
  128. {
  129. if (sh->ddf_layout)
  130. /* ddf always start from first device */
  131. return 0;
  132. /* md starts just after Q block */
  133. if (sh->qd_idx == sh->disks - 1)
  134. return 0;
  135. else
  136. return sh->qd_idx + 1;
  137. }
  138. static inline int raid6_next_disk(int disk, int raid_disks)
  139. {
  140. disk++;
  141. return (disk < raid_disks) ? disk : 0;
  142. }
  143. /* When walking through the disks in a raid5, starting at raid6_d0,
  144. * We need to map each disk to a 'slot', where the data disks are slot
  145. * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
  146. * is raid_disks-1. This help does that mapping.
  147. */
  148. static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
  149. int *count, int syndrome_disks)
  150. {
  151. int slot = *count;
  152. if (sh->ddf_layout)
  153. (*count)++;
  154. if (idx == sh->pd_idx)
  155. return syndrome_disks;
  156. if (idx == sh->qd_idx)
  157. return syndrome_disks + 1;
  158. if (!sh->ddf_layout)
  159. (*count)++;
  160. return slot;
  161. }
  162. static void return_io(struct bio *return_bi)
  163. {
  164. struct bio *bi = return_bi;
  165. while (bi) {
  166. return_bi = bi->bi_next;
  167. bi->bi_next = NULL;
  168. bi->bi_size = 0;
  169. bio_endio(bi, 0);
  170. bi = return_bi;
  171. }
  172. }
  173. static void print_raid5_conf (struct r5conf *conf);
  174. static int stripe_operations_active(struct stripe_head *sh)
  175. {
  176. return sh->check_state || sh->reconstruct_state ||
  177. test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
  178. test_bit(STRIPE_COMPUTE_RUN, &sh->state);
  179. }
  180. static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
  181. {
  182. BUG_ON(!list_empty(&sh->lru));
  183. BUG_ON(atomic_read(&conf->active_stripes)==0);
  184. if (test_bit(STRIPE_HANDLE, &sh->state)) {
  185. if (test_bit(STRIPE_DELAYED, &sh->state) &&
  186. !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  187. list_add_tail(&sh->lru, &conf->delayed_list);
  188. else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
  189. sh->bm_seq - conf->seq_write > 0)
  190. list_add_tail(&sh->lru, &conf->bitmap_list);
  191. else {
  192. clear_bit(STRIPE_DELAYED, &sh->state);
  193. clear_bit(STRIPE_BIT_DELAY, &sh->state);
  194. list_add_tail(&sh->lru, &conf->handle_list);
  195. }
  196. md_wakeup_thread(conf->mddev->thread);
  197. } else {
  198. BUG_ON(stripe_operations_active(sh));
  199. if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  200. if (atomic_dec_return(&conf->preread_active_stripes)
  201. < IO_THRESHOLD)
  202. md_wakeup_thread(conf->mddev->thread);
  203. atomic_dec(&conf->active_stripes);
  204. if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
  205. list_add_tail(&sh->lru, &conf->inactive_list);
  206. wake_up(&conf->wait_for_stripe);
  207. if (conf->retry_read_aligned)
  208. md_wakeup_thread(conf->mddev->thread);
  209. }
  210. }
  211. }
  212. static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
  213. {
  214. if (atomic_dec_and_test(&sh->count))
  215. do_release_stripe(conf, sh);
  216. }
  217. static void release_stripe(struct stripe_head *sh)
  218. {
  219. struct r5conf *conf = sh->raid_conf;
  220. unsigned long flags;
  221. local_irq_save(flags);
  222. if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
  223. do_release_stripe(conf, sh);
  224. spin_unlock(&conf->device_lock);
  225. }
  226. local_irq_restore(flags);
  227. }
  228. static inline void remove_hash(struct stripe_head *sh)
  229. {
  230. pr_debug("remove_hash(), stripe %llu\n",
  231. (unsigned long long)sh->sector);
  232. hlist_del_init(&sh->hash);
  233. }
  234. static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
  235. {
  236. struct hlist_head *hp = stripe_hash(conf, sh->sector);
  237. pr_debug("insert_hash(), stripe %llu\n",
  238. (unsigned long long)sh->sector);
  239. hlist_add_head(&sh->hash, hp);
  240. }
  241. /* find an idle stripe, make sure it is unhashed, and return it. */
  242. static struct stripe_head *get_free_stripe(struct r5conf *conf)
  243. {
  244. struct stripe_head *sh = NULL;
  245. struct list_head *first;
  246. if (list_empty(&conf->inactive_list))
  247. goto out;
  248. first = conf->inactive_list.next;
  249. sh = list_entry(first, struct stripe_head, lru);
  250. list_del_init(first);
  251. remove_hash(sh);
  252. atomic_inc(&conf->active_stripes);
  253. out:
  254. return sh;
  255. }
  256. static void shrink_buffers(struct stripe_head *sh)
  257. {
  258. struct page *p;
  259. int i;
  260. int num = sh->raid_conf->pool_size;
  261. for (i = 0; i < num ; i++) {
  262. p = sh->dev[i].page;
  263. if (!p)
  264. continue;
  265. sh->dev[i].page = NULL;
  266. put_page(p);
  267. }
  268. }
  269. static int grow_buffers(struct stripe_head *sh)
  270. {
  271. int i;
  272. int num = sh->raid_conf->pool_size;
  273. for (i = 0; i < num; i++) {
  274. struct page *page;
  275. if (!(page = alloc_page(GFP_KERNEL))) {
  276. return 1;
  277. }
  278. sh->dev[i].page = page;
  279. }
  280. return 0;
  281. }
  282. static void raid5_build_block(struct stripe_head *sh, int i, int previous);
  283. static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
  284. struct stripe_head *sh);
  285. static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
  286. {
  287. struct r5conf *conf = sh->raid_conf;
  288. int i;
  289. BUG_ON(atomic_read(&sh->count) != 0);
  290. BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
  291. BUG_ON(stripe_operations_active(sh));
  292. pr_debug("init_stripe called, stripe %llu\n",
  293. (unsigned long long)sh->sector);
  294. remove_hash(sh);
  295. sh->generation = conf->generation - previous;
  296. sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
  297. sh->sector = sector;
  298. stripe_set_idx(sector, conf, previous, sh);
  299. sh->state = 0;
  300. for (i = sh->disks; i--; ) {
  301. struct r5dev *dev = &sh->dev[i];
  302. if (dev->toread || dev->read || dev->towrite || dev->written ||
  303. test_bit(R5_LOCKED, &dev->flags)) {
  304. printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
  305. (unsigned long long)sh->sector, i, dev->toread,
  306. dev->read, dev->towrite, dev->written,
  307. test_bit(R5_LOCKED, &dev->flags));
  308. WARN_ON(1);
  309. }
  310. dev->flags = 0;
  311. raid5_build_block(sh, i, previous);
  312. }
  313. insert_hash(conf, sh);
  314. }
  315. static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
  316. short generation)
  317. {
  318. struct stripe_head *sh;
  319. pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
  320. hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
  321. if (sh->sector == sector && sh->generation == generation)
  322. return sh;
  323. pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
  324. return NULL;
  325. }
  326. /*
  327. * Need to check if array has failed when deciding whether to:
  328. * - start an array
  329. * - remove non-faulty devices
  330. * - add a spare
  331. * - allow a reshape
  332. * This determination is simple when no reshape is happening.
  333. * However if there is a reshape, we need to carefully check
  334. * both the before and after sections.
  335. * This is because some failed devices may only affect one
  336. * of the two sections, and some non-in_sync devices may
  337. * be insync in the section most affected by failed devices.
  338. */
  339. static int calc_degraded(struct r5conf *conf)
  340. {
  341. int degraded, degraded2;
  342. int i;
  343. rcu_read_lock();
  344. degraded = 0;
  345. for (i = 0; i < conf->previous_raid_disks; i++) {
  346. struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
  347. if (rdev && test_bit(Faulty, &rdev->flags))
  348. rdev = rcu_dereference(conf->disks[i].replacement);
  349. if (!rdev || test_bit(Faulty, &rdev->flags))
  350. degraded++;
  351. else if (test_bit(In_sync, &rdev->flags))
  352. ;
  353. else
  354. /* not in-sync or faulty.
  355. * If the reshape increases the number of devices,
  356. * this is being recovered by the reshape, so
  357. * this 'previous' section is not in_sync.
  358. * If the number of devices is being reduced however,
  359. * the device can only be part of the array if
  360. * we are reverting a reshape, so this section will
  361. * be in-sync.
  362. */
  363. if (conf->raid_disks >= conf->previous_raid_disks)
  364. degraded++;
  365. }
  366. rcu_read_unlock();
  367. if (conf->raid_disks == conf->previous_raid_disks)
  368. return degraded;
  369. rcu_read_lock();
  370. degraded2 = 0;
  371. for (i = 0; i < conf->raid_disks; i++) {
  372. struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
  373. if (rdev && test_bit(Faulty, &rdev->flags))
  374. rdev = rcu_dereference(conf->disks[i].replacement);
  375. if (!rdev || test_bit(Faulty, &rdev->flags))
  376. degraded2++;
  377. else if (test_bit(In_sync, &rdev->flags))
  378. ;
  379. else
  380. /* not in-sync or faulty.
  381. * If reshape increases the number of devices, this
  382. * section has already been recovered, else it
  383. * almost certainly hasn't.
  384. */
  385. if (conf->raid_disks <= conf->previous_raid_disks)
  386. degraded2++;
  387. }
  388. rcu_read_unlock();
  389. if (degraded2 > degraded)
  390. return degraded2;
  391. return degraded;
  392. }
  393. static int has_failed(struct r5conf *conf)
  394. {
  395. int degraded;
  396. if (conf->mddev->reshape_position == MaxSector)
  397. return conf->mddev->degraded > conf->max_degraded;
  398. degraded = calc_degraded(conf);
  399. if (degraded > conf->max_degraded)
  400. return 1;
  401. return 0;
  402. }
  403. static struct stripe_head *
  404. get_active_stripe(struct r5conf *conf, sector_t sector,
  405. int previous, int noblock, int noquiesce)
  406. {
  407. struct stripe_head *sh;
  408. pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
  409. spin_lock_irq(&conf->device_lock);
  410. do {
  411. wait_event_lock_irq(conf->wait_for_stripe,
  412. conf->quiesce == 0 || noquiesce,
  413. conf->device_lock);
  414. sh = __find_stripe(conf, sector, conf->generation - previous);
  415. if (!sh) {
  416. if (!conf->inactive_blocked)
  417. sh = get_free_stripe(conf);
  418. if (noblock && sh == NULL)
  419. break;
  420. if (!sh) {
  421. conf->inactive_blocked = 1;
  422. wait_event_lock_irq(conf->wait_for_stripe,
  423. !list_empty(&conf->inactive_list) &&
  424. (atomic_read(&conf->active_stripes)
  425. < (conf->max_nr_stripes *3/4)
  426. || !conf->inactive_blocked),
  427. conf->device_lock);
  428. conf->inactive_blocked = 0;
  429. } else
  430. init_stripe(sh, sector, previous);
  431. } else {
  432. if (atomic_read(&sh->count)) {
  433. BUG_ON(!list_empty(&sh->lru)
  434. && !test_bit(STRIPE_EXPANDING, &sh->state)
  435. && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
  436. } else {
  437. if (!test_bit(STRIPE_HANDLE, &sh->state))
  438. atomic_inc(&conf->active_stripes);
  439. if (list_empty(&sh->lru) &&
  440. !test_bit(STRIPE_EXPANDING, &sh->state))
  441. BUG();
  442. list_del_init(&sh->lru);
  443. }
  444. }
  445. } while (sh == NULL);
  446. if (sh)
  447. atomic_inc(&sh->count);
  448. spin_unlock_irq(&conf->device_lock);
  449. return sh;
  450. }
  451. /* Determine if 'data_offset' or 'new_data_offset' should be used
  452. * in this stripe_head.
  453. */
  454. static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
  455. {
  456. sector_t progress = conf->reshape_progress;
  457. /* Need a memory barrier to make sure we see the value
  458. * of conf->generation, or ->data_offset that was set before
  459. * reshape_progress was updated.
  460. */
  461. smp_rmb();
  462. if (progress == MaxSector)
  463. return 0;
  464. if (sh->generation == conf->generation - 1)
  465. return 0;
  466. /* We are in a reshape, and this is a new-generation stripe,
  467. * so use new_data_offset.
  468. */
  469. return 1;
  470. }
  471. static void
  472. raid5_end_read_request(struct bio *bi, int error);
  473. static void
  474. raid5_end_write_request(struct bio *bi, int error);
  475. static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
  476. {
  477. struct r5conf *conf = sh->raid_conf;
  478. int i, disks = sh->disks;
  479. might_sleep();
  480. for (i = disks; i--; ) {
  481. int rw;
  482. int replace_only = 0;
  483. struct bio *bi, *rbi;
  484. struct md_rdev *rdev, *rrdev = NULL;
  485. if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
  486. if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
  487. rw = WRITE_FUA;
  488. else
  489. rw = WRITE;
  490. if (test_bit(R5_Discard, &sh->dev[i].flags))
  491. rw |= REQ_DISCARD;
  492. } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
  493. rw = READ;
  494. else if (test_and_clear_bit(R5_WantReplace,
  495. &sh->dev[i].flags)) {
  496. rw = WRITE;
  497. replace_only = 1;
  498. } else
  499. continue;
  500. if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
  501. rw |= REQ_SYNC;
  502. bi = &sh->dev[i].req;
  503. rbi = &sh->dev[i].rreq; /* For writing to replacement */
  504. rcu_read_lock();
  505. rrdev = rcu_dereference(conf->disks[i].replacement);
  506. smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
  507. rdev = rcu_dereference(conf->disks[i].rdev);
  508. if (!rdev) {
  509. rdev = rrdev;
  510. rrdev = NULL;
  511. }
  512. if (rw & WRITE) {
  513. if (replace_only)
  514. rdev = NULL;
  515. if (rdev == rrdev)
  516. /* We raced and saw duplicates */
  517. rrdev = NULL;
  518. } else {
  519. if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
  520. rdev = rrdev;
  521. rrdev = NULL;
  522. }
  523. if (rdev && test_bit(Faulty, &rdev->flags))
  524. rdev = NULL;
  525. if (rdev)
  526. atomic_inc(&rdev->nr_pending);
  527. if (rrdev && test_bit(Faulty, &rrdev->flags))
  528. rrdev = NULL;
  529. if (rrdev)
  530. atomic_inc(&rrdev->nr_pending);
  531. rcu_read_unlock();
  532. /* We have already checked bad blocks for reads. Now
  533. * need to check for writes. We never accept write errors
  534. * on the replacement, so we don't to check rrdev.
  535. */
  536. while ((rw & WRITE) && rdev &&
  537. test_bit(WriteErrorSeen, &rdev->flags)) {
  538. sector_t first_bad;
  539. int bad_sectors;
  540. int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
  541. &first_bad, &bad_sectors);
  542. if (!bad)
  543. break;
  544. if (bad < 0) {
  545. set_bit(BlockedBadBlocks, &rdev->flags);
  546. if (!conf->mddev->external &&
  547. conf->mddev->flags) {
  548. /* It is very unlikely, but we might
  549. * still need to write out the
  550. * bad block log - better give it
  551. * a chance*/
  552. md_check_recovery(conf->mddev);
  553. }
  554. /*
  555. * Because md_wait_for_blocked_rdev
  556. * will dec nr_pending, we must
  557. * increment it first.
  558. */
  559. atomic_inc(&rdev->nr_pending);
  560. md_wait_for_blocked_rdev(rdev, conf->mddev);
  561. } else {
  562. /* Acknowledged bad block - skip the write */
  563. rdev_dec_pending(rdev, conf->mddev);
  564. rdev = NULL;
  565. }
  566. }
  567. if (rdev) {
  568. if (s->syncing || s->expanding || s->expanded
  569. || s->replacing)
  570. md_sync_acct(rdev->bdev, STRIPE_SECTORS);
  571. set_bit(STRIPE_IO_STARTED, &sh->state);
  572. bio_reset(bi);
  573. bi->bi_bdev = rdev->bdev;
  574. bi->bi_rw = rw;
  575. bi->bi_end_io = (rw & WRITE)
  576. ? raid5_end_write_request
  577. : raid5_end_read_request;
  578. bi->bi_private = sh;
  579. pr_debug("%s: for %llu schedule op %ld on disc %d\n",
  580. __func__, (unsigned long long)sh->sector,
  581. bi->bi_rw, i);
  582. atomic_inc(&sh->count);
  583. if (use_new_offset(conf, sh))
  584. bi->bi_sector = (sh->sector
  585. + rdev->new_data_offset);
  586. else
  587. bi->bi_sector = (sh->sector
  588. + rdev->data_offset);
  589. if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
  590. bi->bi_rw |= REQ_FLUSH;
  591. bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
  592. bi->bi_io_vec[0].bv_offset = 0;
  593. bi->bi_size = STRIPE_SIZE;
  594. if (rrdev)
  595. set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
  596. trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
  597. bi, disk_devt(conf->mddev->gendisk),
  598. sh->dev[i].sector);
  599. generic_make_request(bi);
  600. }
  601. if (rrdev) {
  602. if (s->syncing || s->expanding || s->expanded
  603. || s->replacing)
  604. md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
  605. set_bit(STRIPE_IO_STARTED, &sh->state);
  606. bio_reset(rbi);
  607. rbi->bi_bdev = rrdev->bdev;
  608. rbi->bi_rw = rw;
  609. BUG_ON(!(rw & WRITE));
  610. rbi->bi_end_io = raid5_end_write_request;
  611. rbi->bi_private = sh;
  612. pr_debug("%s: for %llu schedule op %ld on "
  613. "replacement disc %d\n",
  614. __func__, (unsigned long long)sh->sector,
  615. rbi->bi_rw, i);
  616. atomic_inc(&sh->count);
  617. if (use_new_offset(conf, sh))
  618. rbi->bi_sector = (sh->sector
  619. + rrdev->new_data_offset);
  620. else
  621. rbi->bi_sector = (sh->sector
  622. + rrdev->data_offset);
  623. rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
  624. rbi->bi_io_vec[0].bv_offset = 0;
  625. rbi->bi_size = STRIPE_SIZE;
  626. trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
  627. rbi, disk_devt(conf->mddev->gendisk),
  628. sh->dev[i].sector);
  629. generic_make_request(rbi);
  630. }
  631. if (!rdev && !rrdev) {
  632. if (rw & WRITE)
  633. set_bit(STRIPE_DEGRADED, &sh->state);
  634. pr_debug("skip op %ld on disc %d for sector %llu\n",
  635. bi->bi_rw, i, (unsigned long long)sh->sector);
  636. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  637. set_bit(STRIPE_HANDLE, &sh->state);
  638. }
  639. }
  640. }
  641. static struct dma_async_tx_descriptor *
  642. async_copy_data(int frombio, struct bio *bio, struct page *page,
  643. sector_t sector, struct dma_async_tx_descriptor *tx)
  644. {
  645. struct bio_vec *bvl;
  646. struct page *bio_page;
  647. int i;
  648. int page_offset;
  649. struct async_submit_ctl submit;
  650. enum async_tx_flags flags = 0;
  651. if (bio->bi_sector >= sector)
  652. page_offset = (signed)(bio->bi_sector - sector) * 512;
  653. else
  654. page_offset = (signed)(sector - bio->bi_sector) * -512;
  655. if (frombio)
  656. flags |= ASYNC_TX_FENCE;
  657. init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
  658. bio_for_each_segment(bvl, bio, i) {
  659. int len = bvl->bv_len;
  660. int clen;
  661. int b_offset = 0;
  662. if (page_offset < 0) {
  663. b_offset = -page_offset;
  664. page_offset += b_offset;
  665. len -= b_offset;
  666. }
  667. if (len > 0 && page_offset + len > STRIPE_SIZE)
  668. clen = STRIPE_SIZE - page_offset;
  669. else
  670. clen = len;
  671. if (clen > 0) {
  672. b_offset += bvl->bv_offset;
  673. bio_page = bvl->bv_page;
  674. if (frombio)
  675. tx = async_memcpy(page, bio_page, page_offset,
  676. b_offset, clen, &submit);
  677. else
  678. tx = async_memcpy(bio_page, page, b_offset,
  679. page_offset, clen, &submit);
  680. }
  681. /* chain the operations */
  682. submit.depend_tx = tx;
  683. if (clen < len) /* hit end of page */
  684. break;
  685. page_offset += len;
  686. }
  687. return tx;
  688. }
  689. static void ops_complete_biofill(void *stripe_head_ref)
  690. {
  691. struct stripe_head *sh = stripe_head_ref;
  692. struct bio *return_bi = NULL;
  693. int i;
  694. pr_debug("%s: stripe %llu\n", __func__,
  695. (unsigned long long)sh->sector);
  696. /* clear completed biofills */
  697. for (i = sh->disks; i--; ) {
  698. struct r5dev *dev = &sh->dev[i];
  699. /* acknowledge completion of a biofill operation */
  700. /* and check if we need to reply to a read request,
  701. * new R5_Wantfill requests are held off until
  702. * !STRIPE_BIOFILL_RUN
  703. */
  704. if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
  705. struct bio *rbi, *rbi2;
  706. BUG_ON(!dev->read);
  707. rbi = dev->read;
  708. dev->read = NULL;
  709. while (rbi && rbi->bi_sector <
  710. dev->sector + STRIPE_SECTORS) {
  711. rbi2 = r5_next_bio(rbi, dev->sector);
  712. if (!raid5_dec_bi_active_stripes(rbi)) {
  713. rbi->bi_next = return_bi;
  714. return_bi = rbi;
  715. }
  716. rbi = rbi2;
  717. }
  718. }
  719. }
  720. clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
  721. return_io(return_bi);
  722. set_bit(STRIPE_HANDLE, &sh->state);
  723. release_stripe(sh);
  724. }
  725. static void ops_run_biofill(struct stripe_head *sh)
  726. {
  727. struct dma_async_tx_descriptor *tx = NULL;
  728. struct async_submit_ctl submit;
  729. int i;
  730. pr_debug("%s: stripe %llu\n", __func__,
  731. (unsigned long long)sh->sector);
  732. for (i = sh->disks; i--; ) {
  733. struct r5dev *dev = &sh->dev[i];
  734. if (test_bit(R5_Wantfill, &dev->flags)) {
  735. struct bio *rbi;
  736. spin_lock_irq(&sh->stripe_lock);
  737. dev->read = rbi = dev->toread;
  738. dev->toread = NULL;
  739. spin_unlock_irq(&sh->stripe_lock);
  740. while (rbi && rbi->bi_sector <
  741. dev->sector + STRIPE_SECTORS) {
  742. tx = async_copy_data(0, rbi, dev->page,
  743. dev->sector, tx);
  744. rbi = r5_next_bio(rbi, dev->sector);
  745. }
  746. }
  747. }
  748. atomic_inc(&sh->count);
  749. init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
  750. async_trigger_callback(&submit);
  751. }
  752. static void mark_target_uptodate(struct stripe_head *sh, int target)
  753. {
  754. struct r5dev *tgt;
  755. if (target < 0)
  756. return;
  757. tgt = &sh->dev[target];
  758. set_bit(R5_UPTODATE, &tgt->flags);
  759. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  760. clear_bit(R5_Wantcompute, &tgt->flags);
  761. }
  762. static void ops_complete_compute(void *stripe_head_ref)
  763. {
  764. struct stripe_head *sh = stripe_head_ref;
  765. pr_debug("%s: stripe %llu\n", __func__,
  766. (unsigned long long)sh->sector);
  767. /* mark the computed target(s) as uptodate */
  768. mark_target_uptodate(sh, sh->ops.target);
  769. mark_target_uptodate(sh, sh->ops.target2);
  770. clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
  771. if (sh->check_state == check_state_compute_run)
  772. sh->check_state = check_state_compute_result;
  773. set_bit(STRIPE_HANDLE, &sh->state);
  774. release_stripe(sh);
  775. }
  776. /* return a pointer to the address conversion region of the scribble buffer */
  777. static addr_conv_t *to_addr_conv(struct stripe_head *sh,
  778. struct raid5_percpu *percpu)
  779. {
  780. return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
  781. }
  782. static struct dma_async_tx_descriptor *
  783. ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
  784. {
  785. int disks = sh->disks;
  786. struct page **xor_srcs = percpu->scribble;
  787. int target = sh->ops.target;
  788. struct r5dev *tgt = &sh->dev[target];
  789. struct page *xor_dest = tgt->page;
  790. int count = 0;
  791. struct dma_async_tx_descriptor *tx;
  792. struct async_submit_ctl submit;
  793. int i;
  794. pr_debug("%s: stripe %llu block: %d\n",
  795. __func__, (unsigned long long)sh->sector, target);
  796. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  797. for (i = disks; i--; )
  798. if (i != target)
  799. xor_srcs[count++] = sh->dev[i].page;
  800. atomic_inc(&sh->count);
  801. init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
  802. ops_complete_compute, sh, to_addr_conv(sh, percpu));
  803. if (unlikely(count == 1))
  804. tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
  805. else
  806. tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
  807. return tx;
  808. }
  809. /* set_syndrome_sources - populate source buffers for gen_syndrome
  810. * @srcs - (struct page *) array of size sh->disks
  811. * @sh - stripe_head to parse
  812. *
  813. * Populates srcs in proper layout order for the stripe and returns the
  814. * 'count' of sources to be used in a call to async_gen_syndrome. The P
  815. * destination buffer is recorded in srcs[count] and the Q destination
  816. * is recorded in srcs[count+1]].
  817. */
  818. static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
  819. {
  820. int disks = sh->disks;
  821. int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
  822. int d0_idx = raid6_d0(sh);
  823. int count;
  824. int i;
  825. for (i = 0; i < disks; i++)
  826. srcs[i] = NULL;
  827. count = 0;
  828. i = d0_idx;
  829. do {
  830. int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
  831. srcs[slot] = sh->dev[i].page;
  832. i = raid6_next_disk(i, disks);
  833. } while (i != d0_idx);
  834. return syndrome_disks;
  835. }
  836. static struct dma_async_tx_descriptor *
  837. ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
  838. {
  839. int disks = sh->disks;
  840. struct page **blocks = percpu->scribble;
  841. int target;
  842. int qd_idx = sh->qd_idx;
  843. struct dma_async_tx_descriptor *tx;
  844. struct async_submit_ctl submit;
  845. struct r5dev *tgt;
  846. struct page *dest;
  847. int i;
  848. int count;
  849. if (sh->ops.target < 0)
  850. target = sh->ops.target2;
  851. else if (sh->ops.target2 < 0)
  852. target = sh->ops.target;
  853. else
  854. /* we should only have one valid target */
  855. BUG();
  856. BUG_ON(target < 0);
  857. pr_debug("%s: stripe %llu block: %d\n",
  858. __func__, (unsigned long long)sh->sector, target);
  859. tgt = &sh->dev[target];
  860. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  861. dest = tgt->page;
  862. atomic_inc(&sh->count);
  863. if (target == qd_idx) {
  864. count = set_syndrome_sources(blocks, sh);
  865. blocks[count] = NULL; /* regenerating p is not necessary */
  866. BUG_ON(blocks[count+1] != dest); /* q should already be set */
  867. init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
  868. ops_complete_compute, sh,
  869. to_addr_conv(sh, percpu));
  870. tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
  871. } else {
  872. /* Compute any data- or p-drive using XOR */
  873. count = 0;
  874. for (i = disks; i-- ; ) {
  875. if (i == target || i == qd_idx)
  876. continue;
  877. blocks[count++] = sh->dev[i].page;
  878. }
  879. init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
  880. NULL, ops_complete_compute, sh,
  881. to_addr_conv(sh, percpu));
  882. tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
  883. }
  884. return tx;
  885. }
  886. static struct dma_async_tx_descriptor *
  887. ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
  888. {
  889. int i, count, disks = sh->disks;
  890. int syndrome_disks = sh->ddf_layout ? disks : disks-2;
  891. int d0_idx = raid6_d0(sh);
  892. int faila = -1, failb = -1;
  893. int target = sh->ops.target;
  894. int target2 = sh->ops.target2;
  895. struct r5dev *tgt = &sh->dev[target];
  896. struct r5dev *tgt2 = &sh->dev[target2];
  897. struct dma_async_tx_descriptor *tx;
  898. struct page **blocks = percpu->scribble;
  899. struct async_submit_ctl submit;
  900. pr_debug("%s: stripe %llu block1: %d block2: %d\n",
  901. __func__, (unsigned long long)sh->sector, target, target2);
  902. BUG_ON(target < 0 || target2 < 0);
  903. BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
  904. BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
  905. /* we need to open-code set_syndrome_sources to handle the
  906. * slot number conversion for 'faila' and 'failb'
  907. */
  908. for (i = 0; i < disks ; i++)
  909. blocks[i] = NULL;
  910. count = 0;
  911. i = d0_idx;
  912. do {
  913. int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
  914. blocks[slot] = sh->dev[i].page;
  915. if (i == target)
  916. faila = slot;
  917. if (i == target2)
  918. failb = slot;
  919. i = raid6_next_disk(i, disks);
  920. } while (i != d0_idx);
  921. BUG_ON(faila == failb);
  922. if (failb < faila)
  923. swap(faila, failb);
  924. pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
  925. __func__, (unsigned long long)sh->sector, faila, failb);
  926. atomic_inc(&sh->count);
  927. if (failb == syndrome_disks+1) {
  928. /* Q disk is one of the missing disks */
  929. if (faila == syndrome_disks) {
  930. /* Missing P+Q, just recompute */
  931. init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
  932. ops_complete_compute, sh,
  933. to_addr_conv(sh, percpu));
  934. return async_gen_syndrome(blocks, 0, syndrome_disks+2,
  935. STRIPE_SIZE, &submit);
  936. } else {
  937. struct page *dest;
  938. int data_target;
  939. int qd_idx = sh->qd_idx;
  940. /* Missing D+Q: recompute D from P, then recompute Q */
  941. if (target == qd_idx)
  942. data_target = target2;
  943. else
  944. data_target = target;
  945. count = 0;
  946. for (i = disks; i-- ; ) {
  947. if (i == data_target || i == qd_idx)
  948. continue;
  949. blocks[count++] = sh->dev[i].page;
  950. }
  951. dest = sh->dev[data_target].page;
  952. init_async_submit(&submit,
  953. ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
  954. NULL, NULL, NULL,
  955. to_addr_conv(sh, percpu));
  956. tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
  957. &submit);
  958. count = set_syndrome_sources(blocks, sh);
  959. init_async_submit(&submit, ASYNC_TX_FENCE, tx,
  960. ops_complete_compute, sh,
  961. to_addr_conv(sh, percpu));
  962. return async_gen_syndrome(blocks, 0, count+2,
  963. STRIPE_SIZE, &submit);
  964. }
  965. } else {
  966. init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
  967. ops_complete_compute, sh,
  968. to_addr_conv(sh, percpu));
  969. if (failb == syndrome_disks) {
  970. /* We're missing D+P. */
  971. return async_raid6_datap_recov(syndrome_disks+2,
  972. STRIPE_SIZE, faila,
  973. blocks, &submit);
  974. } else {
  975. /* We're missing D+D. */
  976. return async_raid6_2data_recov(syndrome_disks+2,
  977. STRIPE_SIZE, faila, failb,
  978. blocks, &submit);
  979. }
  980. }
  981. }
  982. static void ops_complete_prexor(void *stripe_head_ref)
  983. {
  984. struct stripe_head *sh = stripe_head_ref;
  985. pr_debug("%s: stripe %llu\n", __func__,
  986. (unsigned long long)sh->sector);
  987. }
  988. static struct dma_async_tx_descriptor *
  989. ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
  990. struct dma_async_tx_descriptor *tx)
  991. {
  992. int disks = sh->disks;
  993. struct page **xor_srcs = percpu->scribble;
  994. int count = 0, pd_idx = sh->pd_idx, i;
  995. struct async_submit_ctl submit;
  996. /* existing parity data subtracted */
  997. struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
  998. pr_debug("%s: stripe %llu\n", __func__,
  999. (unsigned long long)sh->sector);
  1000. for (i = disks; i--; ) {
  1001. struct r5dev *dev = &sh->dev[i];
  1002. /* Only process blocks that are known to be uptodate */
  1003. if (test_bit(R5_Wantdrain, &dev->flags))
  1004. xor_srcs[count++] = dev->page;
  1005. }
  1006. init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
  1007. ops_complete_prexor, sh, to_addr_conv(sh, percpu));
  1008. tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
  1009. return tx;
  1010. }
  1011. static struct dma_async_tx_descriptor *
  1012. ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
  1013. {
  1014. int disks = sh->disks;
  1015. int i;
  1016. pr_debug("%s: stripe %llu\n", __func__,
  1017. (unsigned long long)sh->sector);
  1018. for (i = disks; i--; ) {
  1019. struct r5dev *dev = &sh->dev[i];
  1020. struct bio *chosen;
  1021. if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
  1022. struct bio *wbi;
  1023. spin_lock_irq(&sh->stripe_lock);
  1024. chosen = dev->towrite;
  1025. dev->towrite = NULL;
  1026. BUG_ON(dev->written);
  1027. wbi = dev->written = chosen;
  1028. spin_unlock_irq(&sh->stripe_lock);
  1029. while (wbi && wbi->bi_sector <
  1030. dev->sector + STRIPE_SECTORS) {
  1031. if (wbi->bi_rw & REQ_FUA)
  1032. set_bit(R5_WantFUA, &dev->flags);
  1033. if (wbi->bi_rw & REQ_SYNC)
  1034. set_bit(R5_SyncIO, &dev->flags);
  1035. if (wbi->bi_rw & REQ_DISCARD)
  1036. set_bit(R5_Discard, &dev->flags);
  1037. else
  1038. tx = async_copy_data(1, wbi, dev->page,
  1039. dev->sector, tx);
  1040. wbi = r5_next_bio(wbi, dev->sector);
  1041. }
  1042. }
  1043. }
  1044. return tx;
  1045. }
  1046. static void ops_complete_reconstruct(void *stripe_head_ref)
  1047. {
  1048. struct stripe_head *sh = stripe_head_ref;
  1049. int disks = sh->disks;
  1050. int pd_idx = sh->pd_idx;
  1051. int qd_idx = sh->qd_idx;
  1052. int i;
  1053. bool fua = false, sync = false, discard = false;
  1054. pr_debug("%s: stripe %llu\n", __func__,
  1055. (unsigned long long)sh->sector);
  1056. for (i = disks; i--; ) {
  1057. fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
  1058. sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
  1059. discard |= test_bit(R5_Discard, &sh->dev[i].flags);
  1060. }
  1061. for (i = disks; i--; ) {
  1062. struct r5dev *dev = &sh->dev[i];
  1063. if (dev->written || i == pd_idx || i == qd_idx) {
  1064. if (!discard)
  1065. set_bit(R5_UPTODATE, &dev->flags);
  1066. if (fua)
  1067. set_bit(R5_WantFUA, &dev->flags);
  1068. if (sync)
  1069. set_bit(R5_SyncIO, &dev->flags);
  1070. }
  1071. }
  1072. if (sh->reconstruct_state == reconstruct_state_drain_run)
  1073. sh->reconstruct_state = reconstruct_state_drain_result;
  1074. else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
  1075. sh->reconstruct_state = reconstruct_state_prexor_drain_result;
  1076. else {
  1077. BUG_ON(sh->reconstruct_state != reconstruct_state_run);
  1078. sh->reconstruct_state = reconstruct_state_result;
  1079. }
  1080. set_bit(STRIPE_HANDLE, &sh->state);
  1081. release_stripe(sh);
  1082. }
  1083. static void
  1084. ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
  1085. struct dma_async_tx_descriptor *tx)
  1086. {
  1087. int disks = sh->disks;
  1088. struct page **xor_srcs = percpu->scribble;
  1089. struct async_submit_ctl submit;
  1090. int count = 0, pd_idx = sh->pd_idx, i;
  1091. struct page *xor_dest;
  1092. int prexor = 0;
  1093. unsigned long flags;
  1094. pr_debug("%s: stripe %llu\n", __func__,
  1095. (unsigned long long)sh->sector);
  1096. for (i = 0; i < sh->disks; i++) {
  1097. if (pd_idx == i)
  1098. continue;
  1099. if (!test_bit(R5_Discard, &sh->dev[i].flags))
  1100. break;
  1101. }
  1102. if (i >= sh->disks) {
  1103. atomic_inc(&sh->count);
  1104. set_bit(R5_Discard, &sh->dev[pd_idx].flags);
  1105. ops_complete_reconstruct(sh);
  1106. return;
  1107. }
  1108. /* check if prexor is active which means only process blocks
  1109. * that are part of a read-modify-write (written)
  1110. */
  1111. if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
  1112. prexor = 1;
  1113. xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
  1114. for (i = disks; i--; ) {
  1115. struct r5dev *dev = &sh->dev[i];
  1116. if (dev->written)
  1117. xor_srcs[count++] = dev->page;
  1118. }
  1119. } else {
  1120. xor_dest = sh->dev[pd_idx].page;
  1121. for (i = disks; i--; ) {
  1122. struct r5dev *dev = &sh->dev[i];
  1123. if (i != pd_idx)
  1124. xor_srcs[count++] = dev->page;
  1125. }
  1126. }
  1127. /* 1/ if we prexor'd then the dest is reused as a source
  1128. * 2/ if we did not prexor then we are redoing the parity
  1129. * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
  1130. * for the synchronous xor case
  1131. */
  1132. flags = ASYNC_TX_ACK |
  1133. (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
  1134. atomic_inc(&sh->count);
  1135. init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
  1136. to_addr_conv(sh, percpu));
  1137. if (unlikely(count == 1))
  1138. tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
  1139. else
  1140. tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
  1141. }
  1142. static void
  1143. ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
  1144. struct dma_async_tx_descriptor *tx)
  1145. {
  1146. struct async_submit_ctl submit;
  1147. struct page **blocks = percpu->scribble;
  1148. int count, i;
  1149. pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
  1150. for (i = 0; i < sh->disks; i++) {
  1151. if (sh->pd_idx == i || sh->qd_idx == i)
  1152. continue;
  1153. if (!test_bit(R5_Discard, &sh->dev[i].flags))
  1154. break;
  1155. }
  1156. if (i >= sh->disks) {
  1157. atomic_inc(&sh->count);
  1158. set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
  1159. set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
  1160. ops_complete_reconstruct(sh);
  1161. return;
  1162. }
  1163. count = set_syndrome_sources(blocks, sh);
  1164. atomic_inc(&sh->count);
  1165. init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
  1166. sh, to_addr_conv(sh, percpu));
  1167. async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
  1168. }
  1169. static void ops_complete_check(void *stripe_head_ref)
  1170. {
  1171. struct stripe_head *sh = stripe_head_ref;
  1172. pr_debug("%s: stripe %llu\n", __func__,
  1173. (unsigned long long)sh->sector);
  1174. sh->check_state = check_state_check_result;
  1175. set_bit(STRIPE_HANDLE, &sh->state);
  1176. release_stripe(sh);
  1177. }
  1178. static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
  1179. {
  1180. int disks = sh->disks;
  1181. int pd_idx = sh->pd_idx;
  1182. int qd_idx = sh->qd_idx;
  1183. struct page *xor_dest;
  1184. struct page **xor_srcs = percpu->scribble;
  1185. struct dma_async_tx_descriptor *tx;
  1186. struct async_submit_ctl submit;
  1187. int count;
  1188. int i;
  1189. pr_debug("%s: stripe %llu\n", __func__,
  1190. (unsigned long long)sh->sector);
  1191. count = 0;
  1192. xor_dest = sh->dev[pd_idx].page;
  1193. xor_srcs[count++] = xor_dest;
  1194. for (i = disks; i--; ) {
  1195. if (i == pd_idx || i == qd_idx)
  1196. continue;
  1197. xor_srcs[count++] = sh->dev[i].page;
  1198. }
  1199. init_async_submit(&submit, 0, NULL, NULL, NULL,
  1200. to_addr_conv(sh, percpu));
  1201. tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
  1202. &sh->ops.zero_sum_result, &submit);
  1203. atomic_inc(&sh->count);
  1204. init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
  1205. tx = async_trigger_callback(&submit);
  1206. }
  1207. static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
  1208. {
  1209. struct page **srcs = percpu->scribble;
  1210. struct async_submit_ctl submit;
  1211. int count;
  1212. pr_debug("%s: stripe %llu checkp: %d\n", __func__,
  1213. (unsigned long long)sh->sector, checkp);
  1214. count = set_syndrome_sources(srcs, sh);
  1215. if (!checkp)
  1216. srcs[count] = NULL;
  1217. atomic_inc(&sh->count);
  1218. init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
  1219. sh, to_addr_conv(sh, percpu));
  1220. async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
  1221. &sh->ops.zero_sum_result, percpu->spare_page, &submit);
  1222. }
  1223. static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
  1224. {
  1225. int overlap_clear = 0, i, disks = sh->disks;
  1226. struct dma_async_tx_descriptor *tx = NULL;
  1227. struct r5conf *conf = sh->raid_conf;
  1228. int level = conf->level;
  1229. struct raid5_percpu *percpu;
  1230. unsigned long cpu;
  1231. cpu = get_cpu();
  1232. percpu = per_cpu_ptr(conf->percpu, cpu);
  1233. if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
  1234. ops_run_biofill(sh);
  1235. overlap_clear++;
  1236. }
  1237. if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
  1238. if (level < 6)
  1239. tx = ops_run_compute5(sh, percpu);
  1240. else {
  1241. if (sh->ops.target2 < 0 || sh->ops.target < 0)
  1242. tx = ops_run_compute6_1(sh, percpu);
  1243. else
  1244. tx = ops_run_compute6_2(sh, percpu);
  1245. }
  1246. /* terminate the chain if reconstruct is not set to be run */
  1247. if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
  1248. async_tx_ack(tx);
  1249. }
  1250. if (test_bit(STRIPE_OP_PREXOR, &ops_request))
  1251. tx = ops_run_prexor(sh, percpu, tx);
  1252. if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
  1253. tx = ops_run_biodrain(sh, tx);
  1254. overlap_clear++;
  1255. }
  1256. if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
  1257. if (level < 6)
  1258. ops_run_reconstruct5(sh, percpu, tx);
  1259. else
  1260. ops_run_reconstruct6(sh, percpu, tx);
  1261. }
  1262. if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
  1263. if (sh->check_state == check_state_run)
  1264. ops_run_check_p(sh, percpu);
  1265. else if (sh->check_state == check_state_run_q)
  1266. ops_run_check_pq(sh, percpu, 0);
  1267. else if (sh->check_state == check_state_run_pq)
  1268. ops_run_check_pq(sh, percpu, 1);
  1269. else
  1270. BUG();
  1271. }
  1272. if (overlap_clear)
  1273. for (i = disks; i--; ) {
  1274. struct r5dev *dev = &sh->dev[i];
  1275. if (test_and_clear_bit(R5_Overlap, &dev->flags))
  1276. wake_up(&sh->raid_conf->wait_for_overlap);
  1277. }
  1278. put_cpu();
  1279. }
  1280. static int grow_one_stripe(struct r5conf *conf)
  1281. {
  1282. struct stripe_head *sh;
  1283. sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
  1284. if (!sh)
  1285. return 0;
  1286. sh->raid_conf = conf;
  1287. spin_lock_init(&sh->stripe_lock);
  1288. if (grow_buffers(sh)) {
  1289. shrink_buffers(sh);
  1290. kmem_cache_free(conf->slab_cache, sh);
  1291. return 0;
  1292. }
  1293. /* we just created an active stripe so... */
  1294. atomic_set(&sh->count, 1);
  1295. atomic_inc(&conf->active_stripes);
  1296. INIT_LIST_HEAD(&sh->lru);
  1297. release_stripe(sh);
  1298. return 1;
  1299. }
  1300. static int grow_stripes(struct r5conf *conf, int num)
  1301. {
  1302. struct kmem_cache *sc;
  1303. int devs = max(conf->raid_disks, conf->previous_raid_disks);
  1304. if (conf->mddev->gendisk)
  1305. sprintf(conf->cache_name[0],
  1306. "raid%d-%s", conf->level, mdname(conf->mddev));
  1307. else
  1308. sprintf(conf->cache_name[0],
  1309. "raid%d-%p", conf->level, conf->mddev);
  1310. sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
  1311. conf->active_name = 0;
  1312. sc = kmem_cache_create(conf->cache_name[conf->active_name],
  1313. sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
  1314. 0, 0, NULL);
  1315. if (!sc)
  1316. return 1;
  1317. conf->slab_cache = sc;
  1318. conf->pool_size = devs;
  1319. while (num--)
  1320. if (!grow_one_stripe(conf))
  1321. return 1;
  1322. return 0;
  1323. }
  1324. /**
  1325. * scribble_len - return the required size of the scribble region
  1326. * @num - total number of disks in the array
  1327. *
  1328. * The size must be enough to contain:
  1329. * 1/ a struct page pointer for each device in the array +2
  1330. * 2/ room to convert each entry in (1) to its corresponding dma
  1331. * (dma_map_page()) or page (page_address()) address.
  1332. *
  1333. * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
  1334. * calculate over all devices (not just the data blocks), using zeros in place
  1335. * of the P and Q blocks.
  1336. */
  1337. static size_t scribble_len(int num)
  1338. {
  1339. size_t len;
  1340. len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
  1341. return len;
  1342. }
  1343. static int resize_stripes(struct r5conf *conf, int newsize)
  1344. {
  1345. /* Make all the stripes able to hold 'newsize' devices.
  1346. * New slots in each stripe get 'page' set to a new page.
  1347. *
  1348. * This happens in stages:
  1349. * 1/ create a new kmem_cache and allocate the required number of
  1350. * stripe_heads.
  1351. * 2/ gather all the old stripe_heads and transfer the pages across
  1352. * to the new stripe_heads. This will have the side effect of
  1353. * freezing the array as once all stripe_heads have been collected,
  1354. * no IO will be possible. Old stripe heads are freed once their
  1355. * pages have been transferred over, and the old kmem_cache is
  1356. * freed when all stripes are done.
  1357. * 3/ reallocate conf->disks to be suitable bigger. If this fails,
  1358. * we simple return a failre status - no need to clean anything up.
  1359. * 4/ allocate new pages for the new slots in the new stripe_heads.
  1360. * If this fails, we don't bother trying the shrink the
  1361. * stripe_heads down again, we just leave them as they are.
  1362. * As each stripe_head is processed the new one is released into
  1363. * active service.
  1364. *
  1365. * Once step2 is started, we cannot afford to wait for a write,
  1366. * so we use GFP_NOIO allocations.
  1367. */
  1368. struct stripe_head *osh, *nsh;
  1369. LIST_HEAD(newstripes);
  1370. struct disk_info *ndisks;
  1371. unsigned long cpu;
  1372. int err;
  1373. struct kmem_cache *sc;
  1374. int i;
  1375. if (newsize <= conf->pool_size)
  1376. return 0; /* never bother to shrink */
  1377. err = md_allow_write(conf->mddev);
  1378. if (err)
  1379. return err;
  1380. /* Step 1 */
  1381. sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
  1382. sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
  1383. 0, 0, NULL);
  1384. if (!sc)
  1385. return -ENOMEM;
  1386. for (i = conf->max_nr_stripes; i; i--) {
  1387. nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
  1388. if (!nsh)
  1389. break;
  1390. nsh->raid_conf = conf;
  1391. spin_lock_init(&nsh->stripe_lock);
  1392. list_add(&nsh->lru, &newstripes);
  1393. }
  1394. if (i) {
  1395. /* didn't get enough, give up */
  1396. while (!list_empty(&newstripes)) {
  1397. nsh = list_entry(newstripes.next, struct stripe_head, lru);
  1398. list_del(&nsh->lru);
  1399. kmem_cache_free(sc, nsh);
  1400. }
  1401. kmem_cache_destroy(sc);
  1402. return -ENOMEM;
  1403. }
  1404. /* Step 2 - Must use GFP_NOIO now.
  1405. * OK, we have enough stripes, start collecting inactive
  1406. * stripes and copying them over
  1407. */
  1408. list_for_each_entry(nsh, &newstripes, lru) {
  1409. spin_lock_irq(&conf->device_lock);
  1410. wait_event_lock_irq(conf->wait_for_stripe,
  1411. !list_empty(&conf->inactive_list),
  1412. conf->device_lock);
  1413. osh = get_free_stripe(conf);
  1414. spin_unlock_irq(&conf->device_lock);
  1415. atomic_set(&nsh->count, 1);
  1416. for(i=0; i<conf->pool_size; i++)
  1417. nsh->dev[i].page = osh->dev[i].page;
  1418. for( ; i<newsize; i++)
  1419. nsh->dev[i].page = NULL;
  1420. kmem_cache_free(conf->slab_cache, osh);
  1421. }
  1422. kmem_cache_destroy(conf->slab_cache);
  1423. /* Step 3.
  1424. * At this point, we are holding all the stripes so the array
  1425. * is completely stalled, so now is a good time to resize
  1426. * conf->disks and the scribble region
  1427. */
  1428. ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
  1429. if (ndisks) {
  1430. for (i=0; i<conf->raid_disks; i++)
  1431. ndisks[i] = conf->disks[i];
  1432. kfree(conf->disks);
  1433. conf->disks = ndisks;
  1434. } else
  1435. err = -ENOMEM;
  1436. get_online_cpus();
  1437. conf->scribble_len = scribble_len(newsize);
  1438. for_each_present_cpu(cpu) {
  1439. struct raid5_percpu *percpu;
  1440. void *scribble;
  1441. percpu = per_cpu_ptr(conf->percpu, cpu);
  1442. scribble = kmalloc(conf->scribble_len, GFP_NOIO);
  1443. if (scribble) {
  1444. kfree(percpu->scribble);
  1445. percpu->scribble = scribble;
  1446. } else {
  1447. err = -ENOMEM;
  1448. break;
  1449. }
  1450. }
  1451. put_online_cpus();
  1452. /* Step 4, return new stripes to service */
  1453. while(!list_empty(&newstripes)) {
  1454. nsh = list_entry(newstripes.next, struct stripe_head, lru);
  1455. list_del_init(&nsh->lru);
  1456. for (i=conf->raid_disks; i < newsize; i++)
  1457. if (nsh->dev[i].page == NULL) {
  1458. struct page *p = alloc_page(GFP_NOIO);
  1459. nsh->dev[i].page = p;
  1460. if (!p)
  1461. err = -ENOMEM;
  1462. }
  1463. release_stripe(nsh);
  1464. }
  1465. /* critical section pass, GFP_NOIO no longer needed */
  1466. conf->slab_cache = sc;
  1467. conf->active_name = 1-conf->active_name;
  1468. conf->pool_size = newsize;
  1469. return err;
  1470. }
  1471. static int drop_one_stripe(struct r5conf *conf)
  1472. {
  1473. struct stripe_head *sh;
  1474. spin_lock_irq(&conf->device_lock);
  1475. sh = get_free_stripe(conf);
  1476. spin_unlock_irq(&conf->device_lock);
  1477. if (!sh)
  1478. return 0;
  1479. BUG_ON(atomic_read(&sh->count));
  1480. shrink_buffers(sh);
  1481. kmem_cache_free(conf->slab_cache, sh);
  1482. atomic_dec(&conf->active_stripes);
  1483. return 1;
  1484. }
  1485. static void shrink_stripes(struct r5conf *conf)
  1486. {
  1487. while (drop_one_stripe(conf))
  1488. ;
  1489. if (conf->slab_cache)
  1490. kmem_cache_destroy(conf->slab_cache);
  1491. conf->slab_cache = NULL;
  1492. }
  1493. static void raid5_end_read_request(struct bio * bi, int error)
  1494. {
  1495. struct stripe_head *sh = bi->bi_private;
  1496. struct r5conf *conf = sh->raid_conf;
  1497. int disks = sh->disks, i;
  1498. int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
  1499. char b[BDEVNAME_SIZE];
  1500. struct md_rdev *rdev = NULL;
  1501. sector_t s;
  1502. for (i=0 ; i<disks; i++)
  1503. if (bi == &sh->dev[i].req)
  1504. break;
  1505. pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
  1506. (unsigned long long)sh->sector, i, atomic_read(&sh->count),
  1507. uptodate);
  1508. if (i == disks) {
  1509. BUG();
  1510. return;
  1511. }
  1512. if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
  1513. /* If replacement finished while this request was outstanding,
  1514. * 'replacement' might be NULL already.
  1515. * In that case it moved down to 'rdev'.
  1516. * rdev is not removed until all requests are finished.
  1517. */
  1518. rdev = conf->disks[i].replacement;
  1519. if (!rdev)
  1520. rdev = conf->disks[i].rdev;
  1521. if (use_new_offset(conf, sh))
  1522. s = sh->sector + rdev->new_data_offset;
  1523. else
  1524. s = sh->sector + rdev->data_offset;
  1525. if (uptodate) {
  1526. set_bit(R5_UPTODATE, &sh->dev[i].flags);
  1527. if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
  1528. /* Note that this cannot happen on a
  1529. * replacement device. We just fail those on
  1530. * any error
  1531. */
  1532. printk_ratelimited(
  1533. KERN_INFO
  1534. "md/raid:%s: read error corrected"
  1535. " (%lu sectors at %llu on %s)\n",
  1536. mdname(conf->mddev), STRIPE_SECTORS,
  1537. (unsigned long long)s,
  1538. bdevname(rdev->bdev, b));
  1539. atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
  1540. clear_bit(R5_ReadError, &sh->dev[i].flags);
  1541. clear_bit(R5_ReWrite, &sh->dev[i].flags);
  1542. } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
  1543. clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
  1544. if (atomic_read(&rdev->read_errors))
  1545. atomic_set(&rdev->read_errors, 0);
  1546. } else {
  1547. const char *bdn = bdevname(rdev->bdev, b);
  1548. int retry = 0;
  1549. int set_bad = 0;
  1550. clear_bit(R5_UPTODATE, &sh->dev[i].flags);
  1551. atomic_inc(&rdev->read_errors);
  1552. if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
  1553. printk_ratelimited(
  1554. KERN_WARNING
  1555. "md/raid:%s: read error on replacement device "
  1556. "(sector %llu on %s).\n",
  1557. mdname(conf->mddev),
  1558. (unsigned long long)s,
  1559. bdn);
  1560. else if (conf->mddev->degraded >= conf->max_degraded) {
  1561. set_bad = 1;
  1562. printk_ratelimited(
  1563. KERN_WARNING
  1564. "md/raid:%s: read error not correctable "
  1565. "(sector %llu on %s).\n",
  1566. mdname(conf->mddev),
  1567. (unsigned long long)s,
  1568. bdn);
  1569. } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
  1570. /* Oh, no!!! */
  1571. set_bad = 1;
  1572. printk_ratelimited(
  1573. KERN_WARNING
  1574. "md/raid:%s: read error NOT corrected!! "
  1575. "(sector %llu on %s).\n",
  1576. mdname(conf->mddev),
  1577. (unsigned long long)s,
  1578. bdn);
  1579. } else if (atomic_read(&rdev->read_errors)
  1580. > conf->max_nr_stripes)
  1581. printk(KERN_WARNING
  1582. "md/raid:%s: Too many read errors, failing device %s.\n",
  1583. mdname(conf->mddev), bdn);
  1584. else
  1585. retry = 1;
  1586. if (retry)
  1587. if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
  1588. set_bit(R5_ReadError, &sh->dev[i].flags);
  1589. clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
  1590. } else
  1591. set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
  1592. else {
  1593. clear_bit(R5_ReadError, &sh->dev[i].flags);
  1594. clear_bit(R5_ReWrite, &sh->dev[i].flags);
  1595. if (!(set_bad
  1596. && test_bit(In_sync, &rdev->flags)
  1597. && rdev_set_badblocks(
  1598. rdev, sh->sector, STRIPE_SECTORS, 0)))
  1599. md_error(conf->mddev, rdev);
  1600. }
  1601. }
  1602. rdev_dec_pending(rdev, conf->mddev);
  1603. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  1604. set_bit(STRIPE_HANDLE, &sh->state);
  1605. release_stripe(sh);
  1606. }
  1607. static void raid5_end_write_request(struct bio *bi, int error)
  1608. {
  1609. struct stripe_head *sh = bi->bi_private;
  1610. struct r5conf *conf = sh->raid_conf;
  1611. int disks = sh->disks, i;
  1612. struct md_rdev *uninitialized_var(rdev);
  1613. int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
  1614. sector_t first_bad;
  1615. int bad_sectors;
  1616. int replacement = 0;
  1617. for (i = 0 ; i < disks; i++) {
  1618. if (bi == &sh->dev[i].req) {
  1619. rdev = conf->disks[i].rdev;
  1620. break;
  1621. }
  1622. if (bi == &sh->dev[i].rreq) {
  1623. rdev = conf->disks[i].replacement;
  1624. if (rdev)
  1625. replacement = 1;
  1626. else
  1627. /* rdev was removed and 'replacement'
  1628. * replaced it. rdev is not removed
  1629. * until all requests are finished.
  1630. */
  1631. rdev = conf->disks[i].rdev;
  1632. break;
  1633. }
  1634. }
  1635. pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
  1636. (unsigned long long)sh->sector, i, atomic_read(&sh->count),
  1637. uptodate);
  1638. if (i == disks) {
  1639. BUG();
  1640. return;
  1641. }
  1642. if (replacement) {
  1643. if (!uptodate)
  1644. md_error(conf->mddev, rdev);
  1645. else if (is_badblock(rdev, sh->sector,
  1646. STRIPE_SECTORS,
  1647. &first_bad, &bad_sectors))
  1648. set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
  1649. } else {
  1650. if (!uptodate) {
  1651. set_bit(WriteErrorSeen, &rdev->flags);
  1652. set_bit(R5_WriteError, &sh->dev[i].flags);
  1653. if (!test_and_set_bit(WantReplacement, &rdev->flags))
  1654. set_bit(MD_RECOVERY_NEEDED,
  1655. &rdev->mddev->recovery);
  1656. } else if (is_badblock(rdev, sh->sector,
  1657. STRIPE_SECTORS,
  1658. &first_bad, &bad_sectors))
  1659. set_bit(R5_MadeGood, &sh->dev[i].flags);
  1660. }
  1661. rdev_dec_pending(rdev, conf->mddev);
  1662. if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
  1663. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  1664. set_bit(STRIPE_HANDLE, &sh->state);
  1665. release_stripe(sh);
  1666. }
  1667. static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
  1668. static void raid5_build_block(struct stripe_head *sh, int i, int previous)
  1669. {
  1670. struct r5dev *dev = &sh->dev[i];
  1671. bio_init(&dev->req);
  1672. dev->req.bi_io_vec = &dev->vec;
  1673. dev->req.bi_vcnt++;
  1674. dev->req.bi_max_vecs++;
  1675. dev->req.bi_private = sh;
  1676. dev->vec.bv_page = dev->page;
  1677. bio_init(&dev->rreq);
  1678. dev->rreq.bi_io_vec = &dev->rvec;
  1679. dev->rreq.bi_vcnt++;
  1680. dev->rreq.bi_max_vecs++;
  1681. dev->rreq.bi_private = sh;
  1682. dev->rvec.bv_page = dev->page;
  1683. dev->flags = 0;
  1684. dev->sector = compute_blocknr(sh, i, previous);
  1685. }
  1686. static void error(struct mddev *mddev, struct md_rdev *rdev)
  1687. {
  1688. char b[BDEVNAME_SIZE];
  1689. struct r5conf *conf = mddev->private;
  1690. unsigned long flags;
  1691. pr_debug("raid456: error called\n");
  1692. spin_lock_irqsave(&conf->device_lock, flags);
  1693. clear_bit(In_sync, &rdev->flags);
  1694. mddev->degraded = calc_degraded(conf);
  1695. spin_unlock_irqrestore(&conf->device_lock, flags);
  1696. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1697. set_bit(Blocked, &rdev->flags);
  1698. set_bit(Faulty, &rdev->flags);
  1699. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1700. printk(KERN_ALERT
  1701. "md/raid:%s: Disk failure on %s, disabling device.\n"
  1702. "md/raid:%s: Operation continuing on %d devices.\n",
  1703. mdname(mddev),
  1704. bdevname(rdev->bdev, b),
  1705. mdname(mddev),
  1706. conf->raid_disks - mddev->degraded);
  1707. }
  1708. /*
  1709. * Input: a 'big' sector number,
  1710. * Output: index of the data and parity disk, and the sector # in them.
  1711. */
  1712. static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
  1713. int previous, int *dd_idx,
  1714. struct stripe_head *sh)
  1715. {
  1716. sector_t stripe, stripe2;
  1717. sector_t chunk_number;
  1718. unsigned int chunk_offset;
  1719. int pd_idx, qd_idx;
  1720. int ddf_layout = 0;
  1721. sector_t new_sector;
  1722. int algorithm = previous ? conf->prev_algo
  1723. : conf->algorithm;
  1724. int sectors_per_chunk = previous ? conf->prev_chunk_sectors
  1725. : conf->chunk_sectors;
  1726. int raid_disks = previous ? conf->previous_raid_disks
  1727. : conf->raid_disks;
  1728. int data_disks = raid_disks - conf->max_degraded;
  1729. /* First compute the information on this sector */
  1730. /*
  1731. * Compute the chunk number and the sector offset inside the chunk
  1732. */
  1733. chunk_offset = sector_div(r_sector, sectors_per_chunk);
  1734. chunk_number = r_sector;
  1735. /*
  1736. * Compute the stripe number
  1737. */
  1738. stripe = chunk_number;
  1739. *dd_idx = sector_div(stripe, data_disks);
  1740. stripe2 = stripe;
  1741. /*
  1742. * Select the parity disk based on the user selected algorithm.
  1743. */
  1744. pd_idx = qd_idx = -1;
  1745. switch(conf->level) {
  1746. case 4:
  1747. pd_idx = data_disks;
  1748. break;
  1749. case 5:
  1750. switch (algorithm) {
  1751. case ALGORITHM_LEFT_ASYMMETRIC:
  1752. pd_idx = data_disks - sector_div(stripe2, raid_disks);
  1753. if (*dd_idx >= pd_idx)
  1754. (*dd_idx)++;
  1755. break;
  1756. case ALGORITHM_RIGHT_ASYMMETRIC:
  1757. pd_idx = sector_div(stripe2, raid_disks);
  1758. if (*dd_idx >= pd_idx)
  1759. (*dd_idx)++;
  1760. break;
  1761. case ALGORITHM_LEFT_SYMMETRIC:
  1762. pd_idx = data_disks - sector_div(stripe2, raid_disks);
  1763. *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
  1764. break;
  1765. case ALGORITHM_RIGHT_SYMMETRIC:
  1766. pd_idx = sector_div(stripe2, raid_disks);
  1767. *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
  1768. break;
  1769. case ALGORITHM_PARITY_0:
  1770. pd_idx = 0;
  1771. (*dd_idx)++;
  1772. break;
  1773. case ALGORITHM_PARITY_N:
  1774. pd_idx = data_disks;
  1775. break;
  1776. default:
  1777. BUG();
  1778. }
  1779. break;
  1780. case 6:
  1781. switch (algorithm) {
  1782. case ALGORITHM_LEFT_ASYMMETRIC:
  1783. pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
  1784. qd_idx = pd_idx + 1;
  1785. if (pd_idx == raid_disks-1) {
  1786. (*dd_idx)++; /* Q D D D P */
  1787. qd_idx = 0;
  1788. } else if (*dd_idx >= pd_idx)
  1789. (*dd_idx) += 2; /* D D P Q D */
  1790. break;
  1791. case ALGORITHM_RIGHT_ASYMMETRIC:
  1792. pd_idx = sector_div(stripe2, raid_disks);
  1793. qd_idx = pd_idx + 1;
  1794. if (pd_idx == raid_disks-1) {
  1795. (*dd_idx)++; /* Q D D D P */
  1796. qd_idx = 0;
  1797. } else if (*dd_idx >= pd_idx)
  1798. (*dd_idx) += 2; /* D D P Q D */
  1799. break;
  1800. case ALGORITHM_LEFT_SYMMETRIC:
  1801. pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
  1802. qd_idx = (pd_idx + 1) % raid_disks;
  1803. *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
  1804. break;
  1805. case ALGORITHM_RIGHT_SYMMETRIC:
  1806. pd_idx = sector_div(stripe2, raid_disks);
  1807. qd_idx = (pd_idx + 1) % raid_disks;
  1808. *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
  1809. break;
  1810. case ALGORITHM_PARITY_0:
  1811. pd_idx = 0;
  1812. qd_idx = 1;
  1813. (*dd_idx) += 2;
  1814. break;
  1815. case ALGORITHM_PARITY_N:
  1816. pd_idx = data_disks;
  1817. qd_idx = data_disks + 1;
  1818. break;
  1819. case ALGORITHM_ROTATING_ZERO_RESTART:
  1820. /* Exactly the same as RIGHT_ASYMMETRIC, but or
  1821. * of blocks for computing Q is different.
  1822. */
  1823. pd_idx = sector_div(stripe2, raid_disks);
  1824. qd_idx = pd_idx + 1;
  1825. if (pd_idx == raid_disks-1) {
  1826. (*dd_idx)++; /* Q D D D P */
  1827. qd_idx = 0;
  1828. } else if (*dd_idx >= pd_idx)
  1829. (*dd_idx) += 2; /* D D P Q D */
  1830. ddf_layout = 1;
  1831. break;
  1832. case ALGORITHM_ROTATING_N_RESTART:
  1833. /* Same a left_asymmetric, by first stripe is
  1834. * D D D P Q rather than
  1835. * Q D D D P
  1836. */
  1837. stripe2 += 1;
  1838. pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
  1839. qd_idx = pd_idx + 1;
  1840. if (pd_idx == raid_disks-1) {
  1841. (*dd_idx)++; /* Q D D D P */
  1842. qd_idx = 0;
  1843. } else if (*dd_idx >= pd_idx)
  1844. (*dd_idx) += 2; /* D D P Q D */
  1845. ddf_layout = 1;
  1846. break;
  1847. case ALGORITHM_ROTATING_N_CONTINUE:
  1848. /* Same as left_symmetric but Q is before P */
  1849. pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
  1850. qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
  1851. *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
  1852. ddf_layout = 1;
  1853. break;
  1854. case ALGORITHM_LEFT_ASYMMETRIC_6:
  1855. /* RAID5 left_asymmetric, with Q on last device */
  1856. pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
  1857. if (*dd_idx >= pd_idx)
  1858. (*dd_idx)++;
  1859. qd_idx = raid_disks - 1;
  1860. break;
  1861. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  1862. pd_idx = sector_div(stripe2, raid_disks-1);
  1863. if (*dd_idx >= pd_idx)
  1864. (*dd_idx)++;
  1865. qd_idx = raid_disks - 1;
  1866. break;
  1867. case ALGORITHM_LEFT_SYMMETRIC_6:
  1868. pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
  1869. *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
  1870. qd_idx = raid_disks - 1;
  1871. break;
  1872. case ALGORITHM_RIGHT_SYMMETRIC_6:
  1873. pd_idx = sector_div(stripe2, raid_disks-1);
  1874. *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
  1875. qd_idx = raid_disks - 1;
  1876. break;
  1877. case ALGORITHM_PARITY_0_6:
  1878. pd_idx = 0;
  1879. (*dd_idx)++;
  1880. qd_idx = raid_disks - 1;
  1881. break;
  1882. default:
  1883. BUG();
  1884. }
  1885. break;
  1886. }
  1887. if (sh) {
  1888. sh->pd_idx = pd_idx;
  1889. sh->qd_idx = qd_idx;
  1890. sh->ddf_layout = ddf_layout;
  1891. }
  1892. /*
  1893. * Finally, compute the new sector number
  1894. */
  1895. new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
  1896. return new_sector;
  1897. }
  1898. static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
  1899. {
  1900. struct r5conf *conf = sh->raid_conf;
  1901. int raid_disks = sh->disks;
  1902. int data_disks = raid_disks - conf->max_degraded;
  1903. sector_t new_sector = sh->sector, check;
  1904. int sectors_per_chunk = previous ? conf->prev_chunk_sectors
  1905. : conf->chunk_sectors;
  1906. int algorithm = previous ? conf->prev_algo
  1907. : conf->algorithm;
  1908. sector_t stripe;
  1909. int chunk_offset;
  1910. sector_t chunk_number;
  1911. int dummy1, dd_idx = i;
  1912. sector_t r_sector;
  1913. struct stripe_head sh2;
  1914. chunk_offset = sector_div(new_sector, sectors_per_chunk);
  1915. stripe = new_sector;
  1916. if (i == sh->pd_idx)
  1917. return 0;
  1918. switch(conf->level) {
  1919. case 4: break;
  1920. case 5:
  1921. switch (algorithm) {
  1922. case ALGORITHM_LEFT_ASYMMETRIC:
  1923. case ALGORITHM_RIGHT_ASYMMETRIC:
  1924. if (i > sh->pd_idx)
  1925. i--;
  1926. break;
  1927. case ALGORITHM_LEFT_SYMMETRIC:
  1928. case ALGORITHM_RIGHT_SYMMETRIC:
  1929. if (i < sh->pd_idx)
  1930. i += raid_disks;
  1931. i -= (sh->pd_idx + 1);
  1932. break;
  1933. case ALGORITHM_PARITY_0:
  1934. i -= 1;
  1935. break;
  1936. case ALGORITHM_PARITY_N:
  1937. break;
  1938. default:
  1939. BUG();
  1940. }
  1941. break;
  1942. case 6:
  1943. if (i == sh->qd_idx)
  1944. return 0; /* It is the Q disk */
  1945. switch (algorithm) {
  1946. case ALGORITHM_LEFT_ASYMMETRIC:
  1947. case ALGORITHM_RIGHT_ASYMMETRIC:
  1948. case ALGORITHM_ROTATING_ZERO_RESTART:
  1949. case ALGORITHM_ROTATING_N_RESTART:
  1950. if (sh->pd_idx == raid_disks-1)
  1951. i--; /* Q D D D P */
  1952. else if (i > sh->pd_idx)
  1953. i -= 2; /* D D P Q D */
  1954. break;
  1955. case ALGORITHM_LEFT_SYMMETRIC:
  1956. case ALGORITHM_RIGHT_SYMMETRIC:
  1957. if (sh->pd_idx == raid_disks-1)
  1958. i--; /* Q D D D P */
  1959. else {
  1960. /* D D P Q D */
  1961. if (i < sh->pd_idx)
  1962. i += raid_disks;
  1963. i -= (sh->pd_idx + 2);
  1964. }
  1965. break;
  1966. case ALGORITHM_PARITY_0:
  1967. i -= 2;
  1968. break;
  1969. case ALGORITHM_PARITY_N:
  1970. break;
  1971. case ALGORITHM_ROTATING_N_CONTINUE:
  1972. /* Like left_symmetric, but P is before Q */
  1973. if (sh->pd_idx == 0)
  1974. i--; /* P D D D Q */
  1975. else {
  1976. /* D D Q P D */
  1977. if (i < sh->pd_idx)
  1978. i += raid_disks;
  1979. i -= (sh->pd_idx + 1);
  1980. }
  1981. break;
  1982. case ALGORITHM_LEFT_ASYMMETRIC_6:
  1983. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  1984. if (i > sh->pd_idx)
  1985. i--;
  1986. break;
  1987. case ALGORITHM_LEFT_SYMMETRIC_6:
  1988. case ALGORITHM_RIGHT_SYMMETRIC_6:
  1989. if (i < sh->pd_idx)
  1990. i += data_disks + 1;
  1991. i -= (sh->pd_idx + 1);
  1992. break;
  1993. case ALGORITHM_PARITY_0_6:
  1994. i -= 1;
  1995. break;
  1996. default:
  1997. BUG();
  1998. }
  1999. break;
  2000. }
  2001. chunk_number = stripe * data_disks + i;
  2002. r_sector = chunk_number * sectors_per_chunk + chunk_offset;
  2003. check = raid5_compute_sector(conf, r_sector,
  2004. previous, &dummy1, &sh2);
  2005. if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
  2006. || sh2.qd_idx != sh->qd_idx) {
  2007. printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
  2008. mdname(conf->mddev));
  2009. return 0;
  2010. }
  2011. return r_sector;
  2012. }
  2013. static void
  2014. schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
  2015. int rcw, int expand)
  2016. {
  2017. int i, pd_idx = sh->pd_idx, disks = sh->disks;
  2018. struct r5conf *conf = sh->raid_conf;
  2019. int level = conf->level;
  2020. if (rcw) {
  2021. /* if we are not expanding this is a proper write request, and
  2022. * there will be bios with new data to be drained into the
  2023. * stripe cache
  2024. */
  2025. if (!expand) {
  2026. sh->reconstruct_state = reconstruct_state_drain_run;
  2027. set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
  2028. } else
  2029. sh->reconstruct_state = reconstruct_state_run;
  2030. set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
  2031. for (i = disks; i--; ) {
  2032. struct r5dev *dev = &sh->dev[i];
  2033. if (dev->towrite) {
  2034. set_bit(R5_LOCKED, &dev->flags);
  2035. set_bit(R5_Wantdrain, &dev->flags);
  2036. if (!expand)
  2037. clear_bit(R5_UPTODATE, &dev->flags);
  2038. s->locked++;
  2039. }
  2040. }
  2041. if (s->locked + conf->max_degraded == disks)
  2042. if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
  2043. atomic_inc(&conf->pending_full_writes);
  2044. } else {
  2045. BUG_ON(level == 6);
  2046. BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
  2047. test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
  2048. sh->reconstruct_state = reconstruct_state_prexor_drain_run;
  2049. set_bit(STRIPE_OP_PREXOR, &s->ops_request);
  2050. set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
  2051. set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
  2052. for (i = disks; i--; ) {
  2053. struct r5dev *dev = &sh->dev[i];
  2054. if (i == pd_idx)
  2055. continue;
  2056. if (dev->towrite &&
  2057. (test_bit(R5_UPTODATE, &dev->flags) ||
  2058. test_bit(R5_Wantcompute, &dev->flags))) {
  2059. set_bit(R5_Wantdrain, &dev->flags);
  2060. set_bit(R5_LOCKED, &dev->flags);
  2061. clear_bit(R5_UPTODATE, &dev->flags);
  2062. s->locked++;
  2063. }
  2064. }
  2065. }
  2066. /* keep the parity disk(s) locked while asynchronous operations
  2067. * are in flight
  2068. */
  2069. set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
  2070. clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
  2071. s->locked++;
  2072. if (level == 6) {
  2073. int qd_idx = sh->qd_idx;
  2074. struct r5dev *dev = &sh->dev[qd_idx];
  2075. set_bit(R5_LOCKED, &dev->flags);
  2076. clear_bit(R5_UPTODATE, &dev->flags);
  2077. s->locked++;
  2078. }
  2079. pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
  2080. __func__, (unsigned long long)sh->sector,
  2081. s->locked, s->ops_request);
  2082. }
  2083. /*
  2084. * Each stripe/dev can have one or more bion attached.
  2085. * toread/towrite point to the first in a chain.
  2086. * The bi_next chain must be in order.
  2087. */
  2088. static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
  2089. {
  2090. struct bio **bip;
  2091. struct r5conf *conf = sh->raid_conf;
  2092. int firstwrite=0;
  2093. pr_debug("adding bi b#%llu to stripe s#%llu\n",
  2094. (unsigned long long)bi->bi_sector,
  2095. (unsigned long long)sh->sector);
  2096. /*
  2097. * If several bio share a stripe. The bio bi_phys_segments acts as a
  2098. * reference count to avoid race. The reference count should already be
  2099. * increased before this function is called (for example, in
  2100. * make_request()), so other bio sharing this stripe will not free the
  2101. * stripe. If a stripe is owned by one stripe, the stripe lock will
  2102. * protect it.
  2103. */
  2104. spin_lock_irq(&sh->stripe_lock);
  2105. if (forwrite) {
  2106. bip = &sh->dev[dd_idx].towrite;
  2107. if (*bip == NULL)
  2108. firstwrite = 1;
  2109. } else
  2110. bip = &sh->dev[dd_idx].toread;
  2111. while (*bip && (*bip)->bi_sector < bi->bi_sector) {
  2112. if (bio_end_sector(*bip) > bi->bi_sector)
  2113. goto overlap;
  2114. bip = & (*bip)->bi_next;
  2115. }
  2116. if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
  2117. goto overlap;
  2118. BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
  2119. if (*bip)
  2120. bi->bi_next = *bip;
  2121. *bip = bi;
  2122. raid5_inc_bi_active_stripes(bi);
  2123. if (forwrite) {
  2124. /* check if page is covered */
  2125. sector_t sector = sh->dev[dd_idx].sector;
  2126. for (bi=sh->dev[dd_idx].towrite;
  2127. sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
  2128. bi && bi->bi_sector <= sector;
  2129. bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
  2130. if (bio_end_sector(bi) >= sector)
  2131. sector = bio_end_sector(bi);
  2132. }
  2133. if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
  2134. set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
  2135. }
  2136. pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
  2137. (unsigned long long)(*bip)->bi_sector,
  2138. (unsigned long long)sh->sector, dd_idx);
  2139. spin_unlock_irq(&sh->stripe_lock);
  2140. if (conf->mddev->bitmap && firstwrite) {
  2141. bitmap_startwrite(conf->mddev->bitmap, sh->sector,
  2142. STRIPE_SECTORS, 0);
  2143. sh->bm_seq = conf->seq_flush+1;
  2144. set_bit(STRIPE_BIT_DELAY, &sh->state);
  2145. }
  2146. return 1;
  2147. overlap:
  2148. set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
  2149. spin_unlock_irq(&sh->stripe_lock);
  2150. return 0;
  2151. }
  2152. static void end_reshape(struct r5conf *conf);
  2153. static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
  2154. struct stripe_head *sh)
  2155. {
  2156. int sectors_per_chunk =
  2157. previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
  2158. int dd_idx;
  2159. int chunk_offset = sector_div(stripe, sectors_per_chunk);
  2160. int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
  2161. raid5_compute_sector(conf,
  2162. stripe * (disks - conf->max_degraded)
  2163. *sectors_per_chunk + chunk_offset,
  2164. previous,
  2165. &dd_idx, sh);
  2166. }
  2167. static void
  2168. handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
  2169. struct stripe_head_state *s, int disks,
  2170. struct bio **return_bi)
  2171. {
  2172. int i;
  2173. for (i = disks; i--; ) {
  2174. struct bio *bi;
  2175. int bitmap_end = 0;
  2176. if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
  2177. struct md_rdev *rdev;
  2178. rcu_read_lock();
  2179. rdev = rcu_dereference(conf->disks[i].rdev);
  2180. if (rdev && test_bit(In_sync, &rdev->flags))
  2181. atomic_inc(&rdev->nr_pending);
  2182. else
  2183. rdev = NULL;
  2184. rcu_read_unlock();
  2185. if (rdev) {
  2186. if (!rdev_set_badblocks(
  2187. rdev,
  2188. sh->sector,
  2189. STRIPE_SECTORS, 0))
  2190. md_error(conf->mddev, rdev);
  2191. rdev_dec_pending(rdev, conf->mddev);
  2192. }
  2193. }
  2194. spin_lock_irq(&sh->stripe_lock);
  2195. /* fail all writes first */
  2196. bi = sh->dev[i].towrite;
  2197. sh->dev[i].towrite = NULL;
  2198. spin_unlock_irq(&sh->stripe_lock);
  2199. if (bi)
  2200. bitmap_end = 1;
  2201. if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
  2202. wake_up(&conf->wait_for_overlap);
  2203. while (bi && bi->bi_sector <
  2204. sh->dev[i].sector + STRIPE_SECTORS) {
  2205. struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
  2206. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  2207. if (!raid5_dec_bi_active_stripes(bi)) {
  2208. md_write_end(conf->mddev);
  2209. bi->bi_next = *return_bi;
  2210. *return_bi = bi;
  2211. }
  2212. bi = nextbi;
  2213. }
  2214. if (bitmap_end)
  2215. bitmap_endwrite(conf->mddev->bitmap, sh->sector,
  2216. STRIPE_SECTORS, 0, 0);
  2217. bitmap_end = 0;
  2218. /* and fail all 'written' */
  2219. bi = sh->dev[i].written;
  2220. sh->dev[i].written = NULL;
  2221. if (bi) bitmap_end = 1;
  2222. while (bi && bi->bi_sector <
  2223. sh->dev[i].sector + STRIPE_SECTORS) {
  2224. struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
  2225. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  2226. if (!raid5_dec_bi_active_stripes(bi)) {
  2227. md_write_end(conf->mddev);
  2228. bi->bi_next = *return_bi;
  2229. *return_bi = bi;
  2230. }
  2231. bi = bi2;
  2232. }
  2233. /* fail any reads if this device is non-operational and
  2234. * the data has not reached the cache yet.
  2235. */
  2236. if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
  2237. (!test_bit(R5_Insync, &sh->dev[i].flags) ||
  2238. test_bit(R5_ReadError, &sh->dev[i].flags))) {
  2239. spin_lock_irq(&sh->stripe_lock);
  2240. bi = sh->dev[i].toread;
  2241. sh->dev[i].toread = NULL;
  2242. spin_unlock_irq(&sh->stripe_lock);
  2243. if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
  2244. wake_up(&conf->wait_for_overlap);
  2245. while (bi && bi->bi_sector <
  2246. sh->dev[i].sector + STRIPE_SECTORS) {
  2247. struct bio *nextbi =
  2248. r5_next_bio(bi, sh->dev[i].sector);
  2249. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  2250. if (!raid5_dec_bi_active_stripes(bi)) {
  2251. bi->bi_next = *return_bi;
  2252. *return_bi = bi;
  2253. }
  2254. bi = nextbi;
  2255. }
  2256. }
  2257. if (bitmap_end)
  2258. bitmap_endwrite(conf->mddev->bitmap, sh->sector,
  2259. STRIPE_SECTORS, 0, 0);
  2260. /* If we were in the middle of a write the parity block might
  2261. * still be locked - so just clear all R5_LOCKED flags
  2262. */
  2263. clear_bit(R5_LOCKED, &sh->dev[i].flags);
  2264. }
  2265. if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
  2266. if (atomic_dec_and_test(&conf->pending_full_writes))
  2267. md_wakeup_thread(conf->mddev->thread);
  2268. }
  2269. static void
  2270. handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
  2271. struct stripe_head_state *s)
  2272. {
  2273. int abort = 0;
  2274. int i;
  2275. clear_bit(STRIPE_SYNCING, &sh->state);
  2276. s->syncing = 0;
  2277. s->replacing = 0;
  2278. /* There is nothing more to do for sync/check/repair.
  2279. * Don't even need to abort as that is handled elsewhere
  2280. * if needed, and not always wanted e.g. if there is a known
  2281. * bad block here.
  2282. * For recover/replace we need to record a bad block on all
  2283. * non-sync devices, or abort the recovery
  2284. */
  2285. if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
  2286. /* During recovery devices cannot be removed, so
  2287. * locking and refcounting of rdevs is not needed
  2288. */
  2289. for (i = 0; i < conf->raid_disks; i++) {
  2290. struct md_rdev *rdev = conf->disks[i].rdev;
  2291. if (rdev
  2292. && !test_bit(Faulty, &rdev->flags)
  2293. && !test_bit(In_sync, &rdev->flags)
  2294. && !rdev_set_badblocks(rdev, sh->sector,
  2295. STRIPE_SECTORS, 0))
  2296. abort = 1;
  2297. rdev = conf->disks[i].replacement;
  2298. if (rdev
  2299. && !test_bit(Faulty, &rdev->flags)
  2300. && !test_bit(In_sync, &rdev->flags)
  2301. && !rdev_set_badblocks(rdev, sh->sector,
  2302. STRIPE_SECTORS, 0))
  2303. abort = 1;
  2304. }
  2305. if (abort)
  2306. conf->recovery_disabled =
  2307. conf->mddev->recovery_disabled;
  2308. }
  2309. md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
  2310. }
  2311. static int want_replace(struct stripe_head *sh, int disk_idx)
  2312. {
  2313. struct md_rdev *rdev;
  2314. int rv = 0;
  2315. /* Doing recovery so rcu locking not required */
  2316. rdev = sh->raid_conf->disks[disk_idx].replacement;
  2317. if (rdev
  2318. && !test_bit(Faulty, &rdev->flags)
  2319. && !test_bit(In_sync, &rdev->flags)
  2320. && (rdev->recovery_offset <= sh->sector
  2321. || rdev->mddev->recovery_cp <= sh->sector))
  2322. rv = 1;
  2323. return rv;
  2324. }
  2325. /* fetch_block - checks the given member device to see if its data needs
  2326. * to be read or computed to satisfy a request.
  2327. *
  2328. * Returns 1 when no more member devices need to be checked, otherwise returns
  2329. * 0 to tell the loop in handle_stripe_fill to continue
  2330. */
  2331. static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
  2332. int disk_idx, int disks)
  2333. {
  2334. struct r5dev *dev = &sh->dev[disk_idx];
  2335. struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
  2336. &sh->dev[s->failed_num[1]] };
  2337. /* is the data in this block needed, and can we get it? */
  2338. if (!test_bit(R5_LOCKED, &dev->flags) &&
  2339. !test_bit(R5_UPTODATE, &dev->flags) &&
  2340. (dev->toread ||
  2341. (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
  2342. s->syncing || s->expanding ||
  2343. (s->replacing && want_replace(sh, disk_idx)) ||
  2344. (s->failed >= 1 && fdev[0]->toread) ||
  2345. (s->failed >= 2 && fdev[1]->toread) ||
  2346. (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
  2347. !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
  2348. (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
  2349. /* we would like to get this block, possibly by computing it,
  2350. * otherwise read it if the backing disk is insync
  2351. */
  2352. BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
  2353. BUG_ON(test_bit(R5_Wantread, &dev->flags));
  2354. if ((s->uptodate == disks - 1) &&
  2355. (s->failed && (disk_idx == s->failed_num[0] ||
  2356. disk_idx == s->failed_num[1]))) {
  2357. /* have disk failed, and we're requested to fetch it;
  2358. * do compute it
  2359. */
  2360. pr_debug("Computing stripe %llu block %d\n",
  2361. (unsigned long long)sh->sector, disk_idx);
  2362. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  2363. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  2364. set_bit(R5_Wantcompute, &dev->flags);
  2365. sh->ops.target = disk_idx;
  2366. sh->ops.target2 = -1; /* no 2nd target */
  2367. s->req_compute = 1;
  2368. /* Careful: from this point on 'uptodate' is in the eye
  2369. * of raid_run_ops which services 'compute' operations
  2370. * before writes. R5_Wantcompute flags a block that will
  2371. * be R5_UPTODATE by the time it is needed for a
  2372. * subsequent operation.
  2373. */
  2374. s->uptodate++;
  2375. return 1;
  2376. } else if (s->uptodate == disks-2 && s->failed >= 2) {
  2377. /* Computing 2-failure is *very* expensive; only
  2378. * do it if failed >= 2
  2379. */
  2380. int other;
  2381. for (other = disks; other--; ) {
  2382. if (other == disk_idx)
  2383. continue;
  2384. if (!test_bit(R5_UPTODATE,
  2385. &sh->dev[other].flags))
  2386. break;
  2387. }
  2388. BUG_ON(other < 0);
  2389. pr_debug("Computing stripe %llu blocks %d,%d\n",
  2390. (unsigned long long)sh->sector,
  2391. disk_idx, other);
  2392. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  2393. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  2394. set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
  2395. set_bit(R5_Wantcompute, &sh->dev[other].flags);
  2396. sh->ops.target = disk_idx;
  2397. sh->ops.target2 = other;
  2398. s->uptodate += 2;
  2399. s->req_compute = 1;
  2400. return 1;
  2401. } else if (test_bit(R5_Insync, &dev->flags)) {
  2402. set_bit(R5_LOCKED, &dev->flags);
  2403. set_bit(R5_Wantread, &dev->flags);
  2404. s->locked++;
  2405. pr_debug("Reading block %d (sync=%d)\n",
  2406. disk_idx, s->syncing);
  2407. }
  2408. }
  2409. return 0;
  2410. }
  2411. /**
  2412. * handle_stripe_fill - read or compute data to satisfy pending requests.
  2413. */
  2414. static void handle_stripe_fill(struct stripe_head *sh,
  2415. struct stripe_head_state *s,
  2416. int disks)
  2417. {
  2418. int i;
  2419. /* look for blocks to read/compute, skip this if a compute
  2420. * is already in flight, or if the stripe contents are in the
  2421. * midst of changing due to a write
  2422. */
  2423. if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
  2424. !sh->reconstruct_state)
  2425. for (i = disks; i--; )
  2426. if (fetch_block(sh, s, i, disks))
  2427. break;
  2428. set_bit(STRIPE_HANDLE, &sh->state);
  2429. }
  2430. /* handle_stripe_clean_event
  2431. * any written block on an uptodate or failed drive can be returned.
  2432. * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
  2433. * never LOCKED, so we don't need to test 'failed' directly.
  2434. */
  2435. static void handle_stripe_clean_event(struct r5conf *conf,
  2436. struct stripe_head *sh, int disks, struct bio **return_bi)
  2437. {
  2438. int i;
  2439. struct r5dev *dev;
  2440. for (i = disks; i--; )
  2441. if (sh->dev[i].written) {
  2442. dev = &sh->dev[i];
  2443. if (!test_bit(R5_LOCKED, &dev->flags) &&
  2444. (test_bit(R5_UPTODATE, &dev->flags) ||
  2445. test_bit(R5_Discard, &dev->flags))) {
  2446. /* We can return any write requests */
  2447. struct bio *wbi, *wbi2;
  2448. pr_debug("Return write for disc %d\n", i);
  2449. if (test_and_clear_bit(R5_Discard, &dev->flags))
  2450. clear_bit(R5_UPTODATE, &dev->flags);
  2451. wbi = dev->written;
  2452. dev->written = NULL;
  2453. while (wbi && wbi->bi_sector <
  2454. dev->sector + STRIPE_SECTORS) {
  2455. wbi2 = r5_next_bio(wbi, dev->sector);
  2456. if (!raid5_dec_bi_active_stripes(wbi)) {
  2457. md_write_end(conf->mddev);
  2458. wbi->bi_next = *return_bi;
  2459. *return_bi = wbi;
  2460. }
  2461. wbi = wbi2;
  2462. }
  2463. bitmap_endwrite(conf->mddev->bitmap, sh->sector,
  2464. STRIPE_SECTORS,
  2465. !test_bit(STRIPE_DEGRADED, &sh->state),
  2466. 0);
  2467. }
  2468. } else if (test_bit(R5_Discard, &sh->dev[i].flags))
  2469. clear_bit(R5_Discard, &sh->dev[i].flags);
  2470. if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
  2471. if (atomic_dec_and_test(&conf->pending_full_writes))
  2472. md_wakeup_thread(conf->mddev->thread);
  2473. }
  2474. static void handle_stripe_dirtying(struct r5conf *conf,
  2475. struct stripe_head *sh,
  2476. struct stripe_head_state *s,
  2477. int disks)
  2478. {
  2479. int rmw = 0, rcw = 0, i;
  2480. sector_t recovery_cp = conf->mddev->recovery_cp;
  2481. /* RAID6 requires 'rcw' in current implementation.
  2482. * Otherwise, check whether resync is now happening or should start.
  2483. * If yes, then the array is dirty (after unclean shutdown or
  2484. * initial creation), so parity in some stripes might be inconsistent.
  2485. * In this case, we need to always do reconstruct-write, to ensure
  2486. * that in case of drive failure or read-error correction, we
  2487. * generate correct data from the parity.
  2488. */
  2489. if (conf->max_degraded == 2 ||
  2490. (recovery_cp < MaxSector && sh->sector >= recovery_cp)) {
  2491. /* Calculate the real rcw later - for now make it
  2492. * look like rcw is cheaper
  2493. */
  2494. rcw = 1; rmw = 2;
  2495. pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n",
  2496. conf->max_degraded, (unsigned long long)recovery_cp,
  2497. (unsigned long long)sh->sector);
  2498. } else for (i = disks; i--; ) {
  2499. /* would I have to read this buffer for read_modify_write */
  2500. struct r5dev *dev = &sh->dev[i];
  2501. if ((dev->towrite || i == sh->pd_idx) &&
  2502. !test_bit(R5_LOCKED, &dev->flags) &&
  2503. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2504. test_bit(R5_Wantcompute, &dev->flags))) {
  2505. if (test_bit(R5_Insync, &dev->flags))
  2506. rmw++;
  2507. else
  2508. rmw += 2*disks; /* cannot read it */
  2509. }
  2510. /* Would I have to read this buffer for reconstruct_write */
  2511. if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
  2512. !test_bit(R5_LOCKED, &dev->flags) &&
  2513. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2514. test_bit(R5_Wantcompute, &dev->flags))) {
  2515. if (test_bit(R5_Insync, &dev->flags)) rcw++;
  2516. else
  2517. rcw += 2*disks;
  2518. }
  2519. }
  2520. pr_debug("for sector %llu, rmw=%d rcw=%d\n",
  2521. (unsigned long long)sh->sector, rmw, rcw);
  2522. set_bit(STRIPE_HANDLE, &sh->state);
  2523. if (rmw < rcw && rmw > 0) {
  2524. /* prefer read-modify-write, but need to get some data */
  2525. blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d",
  2526. (unsigned long long)sh->sector, rmw);
  2527. for (i = disks; i--; ) {
  2528. struct r5dev *dev = &sh->dev[i];
  2529. if ((dev->towrite || i == sh->pd_idx) &&
  2530. !test_bit(R5_LOCKED, &dev->flags) &&
  2531. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2532. test_bit(R5_Wantcompute, &dev->flags)) &&
  2533. test_bit(R5_Insync, &dev->flags)) {
  2534. if (
  2535. test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
  2536. pr_debug("Read_old block "
  2537. "%d for r-m-w\n", i);
  2538. set_bit(R5_LOCKED, &dev->flags);
  2539. set_bit(R5_Wantread, &dev->flags);
  2540. s->locked++;
  2541. } else {
  2542. set_bit(STRIPE_DELAYED, &sh->state);
  2543. set_bit(STRIPE_HANDLE, &sh->state);
  2544. }
  2545. }
  2546. }
  2547. }
  2548. if (rcw <= rmw && rcw > 0) {
  2549. /* want reconstruct write, but need to get some data */
  2550. int qread =0;
  2551. rcw = 0;
  2552. for (i = disks; i--; ) {
  2553. struct r5dev *dev = &sh->dev[i];
  2554. if (!test_bit(R5_OVERWRITE, &dev->flags) &&
  2555. i != sh->pd_idx && i != sh->qd_idx &&
  2556. !test_bit(R5_LOCKED, &dev->flags) &&
  2557. !(test_bit(R5_UPTODATE, &dev->flags) ||
  2558. test_bit(R5_Wantcompute, &dev->flags))) {
  2559. rcw++;
  2560. if (!test_bit(R5_Insync, &dev->flags))
  2561. continue; /* it's a failed drive */
  2562. if (
  2563. test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
  2564. pr_debug("Read_old block "
  2565. "%d for Reconstruct\n", i);
  2566. set_bit(R5_LOCKED, &dev->flags);
  2567. set_bit(R5_Wantread, &dev->flags);
  2568. s->locked++;
  2569. qread++;
  2570. } else {
  2571. set_bit(STRIPE_DELAYED, &sh->state);
  2572. set_bit(STRIPE_HANDLE, &sh->state);
  2573. }
  2574. }
  2575. }
  2576. if (rcw)
  2577. blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
  2578. (unsigned long long)sh->sector,
  2579. rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
  2580. }
  2581. /* now if nothing is locked, and if we have enough data,
  2582. * we can start a write request
  2583. */
  2584. /* since handle_stripe can be called at any time we need to handle the
  2585. * case where a compute block operation has been submitted and then a
  2586. * subsequent call wants to start a write request. raid_run_ops only
  2587. * handles the case where compute block and reconstruct are requested
  2588. * simultaneously. If this is not the case then new writes need to be
  2589. * held off until the compute completes.
  2590. */
  2591. if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
  2592. (s->locked == 0 && (rcw == 0 || rmw == 0) &&
  2593. !test_bit(STRIPE_BIT_DELAY, &sh->state)))
  2594. schedule_reconstruction(sh, s, rcw == 0, 0);
  2595. }
  2596. static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
  2597. struct stripe_head_state *s, int disks)
  2598. {
  2599. struct r5dev *dev = NULL;
  2600. set_bit(STRIPE_HANDLE, &sh->state);
  2601. switch (sh->check_state) {
  2602. case check_state_idle:
  2603. /* start a new check operation if there are no failures */
  2604. if (s->failed == 0) {
  2605. BUG_ON(s->uptodate != disks);
  2606. sh->check_state = check_state_run;
  2607. set_bit(STRIPE_OP_CHECK, &s->ops_request);
  2608. clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
  2609. s->uptodate--;
  2610. break;
  2611. }
  2612. dev = &sh->dev[s->failed_num[0]];
  2613. /* fall through */
  2614. case check_state_compute_result:
  2615. sh->check_state = check_state_idle;
  2616. if (!dev)
  2617. dev = &sh->dev[sh->pd_idx];
  2618. /* check that a write has not made the stripe insync */
  2619. if (test_bit(STRIPE_INSYNC, &sh->state))
  2620. break;
  2621. /* either failed parity check, or recovery is happening */
  2622. BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
  2623. BUG_ON(s->uptodate != disks);
  2624. set_bit(R5_LOCKED, &dev->flags);
  2625. s->locked++;
  2626. set_bit(R5_Wantwrite, &dev->flags);
  2627. clear_bit(STRIPE_DEGRADED, &sh->state);
  2628. set_bit(STRIPE_INSYNC, &sh->state);
  2629. break;
  2630. case check_state_run:
  2631. break; /* we will be called again upon completion */
  2632. case check_state_check_result:
  2633. sh->check_state = check_state_idle;
  2634. /* if a failure occurred during the check operation, leave
  2635. * STRIPE_INSYNC not set and let the stripe be handled again
  2636. */
  2637. if (s->failed)
  2638. break;
  2639. /* handle a successful check operation, if parity is correct
  2640. * we are done. Otherwise update the mismatch count and repair
  2641. * parity if !MD_RECOVERY_CHECK
  2642. */
  2643. if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
  2644. /* parity is correct (on disc,
  2645. * not in buffer any more)
  2646. */
  2647. set_bit(STRIPE_INSYNC, &sh->state);
  2648. else {
  2649. atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
  2650. if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
  2651. /* don't try to repair!! */
  2652. set_bit(STRIPE_INSYNC, &sh->state);
  2653. else {
  2654. sh->check_state = check_state_compute_run;
  2655. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  2656. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  2657. set_bit(R5_Wantcompute,
  2658. &sh->dev[sh->pd_idx].flags);
  2659. sh->ops.target = sh->pd_idx;
  2660. sh->ops.target2 = -1;
  2661. s->uptodate++;
  2662. }
  2663. }
  2664. break;
  2665. case check_state_compute_run:
  2666. break;
  2667. default:
  2668. printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
  2669. __func__, sh->check_state,
  2670. (unsigned long long) sh->sector);
  2671. BUG();
  2672. }
  2673. }
  2674. static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
  2675. struct stripe_head_state *s,
  2676. int disks)
  2677. {
  2678. int pd_idx = sh->pd_idx;
  2679. int qd_idx = sh->qd_idx;
  2680. struct r5dev *dev;
  2681. set_bit(STRIPE_HANDLE, &sh->state);
  2682. BUG_ON(s->failed > 2);
  2683. /* Want to check and possibly repair P and Q.
  2684. * However there could be one 'failed' device, in which
  2685. * case we can only check one of them, possibly using the
  2686. * other to generate missing data
  2687. */
  2688. switch (sh->check_state) {
  2689. case check_state_idle:
  2690. /* start a new check operation if there are < 2 failures */
  2691. if (s->failed == s->q_failed) {
  2692. /* The only possible failed device holds Q, so it
  2693. * makes sense to check P (If anything else were failed,
  2694. * we would have used P to recreate it).
  2695. */
  2696. sh->check_state = check_state_run;
  2697. }
  2698. if (!s->q_failed && s->failed < 2) {
  2699. /* Q is not failed, and we didn't use it to generate
  2700. * anything, so it makes sense to check it
  2701. */
  2702. if (sh->check_state == check_state_run)
  2703. sh->check_state = check_state_run_pq;
  2704. else
  2705. sh->check_state = check_state_run_q;
  2706. }
  2707. /* discard potentially stale zero_sum_result */
  2708. sh->ops.zero_sum_result = 0;
  2709. if (sh->check_state == check_state_run) {
  2710. /* async_xor_zero_sum destroys the contents of P */
  2711. clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
  2712. s->uptodate--;
  2713. }
  2714. if (sh->check_state >= check_state_run &&
  2715. sh->check_state <= check_state_run_pq) {
  2716. /* async_syndrome_zero_sum preserves P and Q, so
  2717. * no need to mark them !uptodate here
  2718. */
  2719. set_bit(STRIPE_OP_CHECK, &s->ops_request);
  2720. break;
  2721. }
  2722. /* we have 2-disk failure */
  2723. BUG_ON(s->failed != 2);
  2724. /* fall through */
  2725. case check_state_compute_result:
  2726. sh->check_state = check_state_idle;
  2727. /* check that a write has not made the stripe insync */
  2728. if (test_bit(STRIPE_INSYNC, &sh->state))
  2729. break;
  2730. /* now write out any block on a failed drive,
  2731. * or P or Q if they were recomputed
  2732. */
  2733. BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
  2734. if (s->failed == 2) {
  2735. dev = &sh->dev[s->failed_num[1]];
  2736. s->locked++;
  2737. set_bit(R5_LOCKED, &dev->flags);
  2738. set_bit(R5_Wantwrite, &dev->flags);
  2739. }
  2740. if (s->failed >= 1) {
  2741. dev = &sh->dev[s->failed_num[0]];
  2742. s->locked++;
  2743. set_bit(R5_LOCKED, &dev->flags);
  2744. set_bit(R5_Wantwrite, &dev->flags);
  2745. }
  2746. if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
  2747. dev = &sh->dev[pd_idx];
  2748. s->locked++;
  2749. set_bit(R5_LOCKED, &dev->flags);
  2750. set_bit(R5_Wantwrite, &dev->flags);
  2751. }
  2752. if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
  2753. dev = &sh->dev[qd_idx];
  2754. s->locked++;
  2755. set_bit(R5_LOCKED, &dev->flags);
  2756. set_bit(R5_Wantwrite, &dev->flags);
  2757. }
  2758. clear_bit(STRIPE_DEGRADED, &sh->state);
  2759. set_bit(STRIPE_INSYNC, &sh->state);
  2760. break;
  2761. case check_state_run:
  2762. case check_state_run_q:
  2763. case check_state_run_pq:
  2764. break; /* we will be called again upon completion */
  2765. case check_state_check_result:
  2766. sh->check_state = check_state_idle;
  2767. /* handle a successful check operation, if parity is correct
  2768. * we are done. Otherwise update the mismatch count and repair
  2769. * parity if !MD_RECOVERY_CHECK
  2770. */
  2771. if (sh->ops.zero_sum_result == 0) {
  2772. /* both parities are correct */
  2773. if (!s->failed)
  2774. set_bit(STRIPE_INSYNC, &sh->state);
  2775. else {
  2776. /* in contrast to the raid5 case we can validate
  2777. * parity, but still have a failure to write
  2778. * back
  2779. */
  2780. sh->check_state = check_state_compute_result;
  2781. /* Returning at this point means that we may go
  2782. * off and bring p and/or q uptodate again so
  2783. * we make sure to check zero_sum_result again
  2784. * to verify if p or q need writeback
  2785. */
  2786. }
  2787. } else {
  2788. atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
  2789. if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
  2790. /* don't try to repair!! */
  2791. set_bit(STRIPE_INSYNC, &sh->state);
  2792. else {
  2793. int *target = &sh->ops.target;
  2794. sh->ops.target = -1;
  2795. sh->ops.target2 = -1;
  2796. sh->check_state = check_state_compute_run;
  2797. set_bit(STRIPE_COMPUTE_RUN, &sh->state);
  2798. set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
  2799. if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
  2800. set_bit(R5_Wantcompute,
  2801. &sh->dev[pd_idx].flags);
  2802. *target = pd_idx;
  2803. target = &sh->ops.target2;
  2804. s->uptodate++;
  2805. }
  2806. if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
  2807. set_bit(R5_Wantcompute,
  2808. &sh->dev[qd_idx].flags);
  2809. *target = qd_idx;
  2810. s->uptodate++;
  2811. }
  2812. }
  2813. }
  2814. break;
  2815. case check_state_compute_run:
  2816. break;
  2817. default:
  2818. printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
  2819. __func__, sh->check_state,
  2820. (unsigned long long) sh->sector);
  2821. BUG();
  2822. }
  2823. }
  2824. static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
  2825. {
  2826. int i;
  2827. /* We have read all the blocks in this stripe and now we need to
  2828. * copy some of them into a target stripe for expand.
  2829. */
  2830. struct dma_async_tx_descriptor *tx = NULL;
  2831. clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
  2832. for (i = 0; i < sh->disks; i++)
  2833. if (i != sh->pd_idx && i != sh->qd_idx) {
  2834. int dd_idx, j;
  2835. struct stripe_head *sh2;
  2836. struct async_submit_ctl submit;
  2837. sector_t bn = compute_blocknr(sh, i, 1);
  2838. sector_t s = raid5_compute_sector(conf, bn, 0,
  2839. &dd_idx, NULL);
  2840. sh2 = get_active_stripe(conf, s, 0, 1, 1);
  2841. if (sh2 == NULL)
  2842. /* so far only the early blocks of this stripe
  2843. * have been requested. When later blocks
  2844. * get requested, we will try again
  2845. */
  2846. continue;
  2847. if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
  2848. test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
  2849. /* must have already done this block */
  2850. release_stripe(sh2);
  2851. continue;
  2852. }
  2853. /* place all the copies on one channel */
  2854. init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
  2855. tx = async_memcpy(sh2->dev[dd_idx].page,
  2856. sh->dev[i].page, 0, 0, STRIPE_SIZE,
  2857. &submit);
  2858. set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
  2859. set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
  2860. for (j = 0; j < conf->raid_disks; j++)
  2861. if (j != sh2->pd_idx &&
  2862. j != sh2->qd_idx &&
  2863. !test_bit(R5_Expanded, &sh2->dev[j].flags))
  2864. break;
  2865. if (j == conf->raid_disks) {
  2866. set_bit(STRIPE_EXPAND_READY, &sh2->state);
  2867. set_bit(STRIPE_HANDLE, &sh2->state);
  2868. }
  2869. release_stripe(sh2);
  2870. }
  2871. /* done submitting copies, wait for them to complete */
  2872. async_tx_quiesce(&tx);
  2873. }
  2874. /*
  2875. * handle_stripe - do things to a stripe.
  2876. *
  2877. * We lock the stripe by setting STRIPE_ACTIVE and then examine the
  2878. * state of various bits to see what needs to be done.
  2879. * Possible results:
  2880. * return some read requests which now have data
  2881. * return some write requests which are safely on storage
  2882. * schedule a read on some buffers
  2883. * schedule a write of some buffers
  2884. * return confirmation of parity correctness
  2885. *
  2886. */
  2887. static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
  2888. {
  2889. struct r5conf *conf = sh->raid_conf;
  2890. int disks = sh->disks;
  2891. struct r5dev *dev;
  2892. int i;
  2893. int do_recovery = 0;
  2894. memset(s, 0, sizeof(*s));
  2895. s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
  2896. s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
  2897. s->failed_num[0] = -1;
  2898. s->failed_num[1] = -1;
  2899. /* Now to look around and see what can be done */
  2900. rcu_read_lock();
  2901. for (i=disks; i--; ) {
  2902. struct md_rdev *rdev;
  2903. sector_t first_bad;
  2904. int bad_sectors;
  2905. int is_bad = 0;
  2906. dev = &sh->dev[i];
  2907. pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
  2908. i, dev->flags,
  2909. dev->toread, dev->towrite, dev->written);
  2910. /* maybe we can reply to a read
  2911. *
  2912. * new wantfill requests are only permitted while
  2913. * ops_complete_biofill is guaranteed to be inactive
  2914. */
  2915. if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
  2916. !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
  2917. set_bit(R5_Wantfill, &dev->flags);
  2918. /* now count some things */
  2919. if (test_bit(R5_LOCKED, &dev->flags))
  2920. s->locked++;
  2921. if (test_bit(R5_UPTODATE, &dev->flags))
  2922. s->uptodate++;
  2923. if (test_bit(R5_Wantcompute, &dev->flags)) {
  2924. s->compute++;
  2925. BUG_ON(s->compute > 2);
  2926. }
  2927. if (test_bit(R5_Wantfill, &dev->flags))
  2928. s->to_fill++;
  2929. else if (dev->toread)
  2930. s->to_read++;
  2931. if (dev->towrite) {
  2932. s->to_write++;
  2933. if (!test_bit(R5_OVERWRITE, &dev->flags))
  2934. s->non_overwrite++;
  2935. }
  2936. if (dev->written)
  2937. s->written++;
  2938. /* Prefer to use the replacement for reads, but only
  2939. * if it is recovered enough and has no bad blocks.
  2940. */
  2941. rdev = rcu_dereference(conf->disks[i].replacement);
  2942. if (rdev && !test_bit(Faulty, &rdev->flags) &&
  2943. rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
  2944. !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
  2945. &first_bad, &bad_sectors))
  2946. set_bit(R5_ReadRepl, &dev->flags);
  2947. else {
  2948. if (rdev)
  2949. set_bit(R5_NeedReplace, &dev->flags);
  2950. rdev = rcu_dereference(conf->disks[i].rdev);
  2951. clear_bit(R5_ReadRepl, &dev->flags);
  2952. }
  2953. if (rdev && test_bit(Faulty, &rdev->flags))
  2954. rdev = NULL;
  2955. if (rdev) {
  2956. is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
  2957. &first_bad, &bad_sectors);
  2958. if (s->blocked_rdev == NULL
  2959. && (test_bit(Blocked, &rdev->flags)
  2960. || is_bad < 0)) {
  2961. if (is_bad < 0)
  2962. set_bit(BlockedBadBlocks,
  2963. &rdev->flags);
  2964. s->blocked_rdev = rdev;
  2965. atomic_inc(&rdev->nr_pending);
  2966. }
  2967. }
  2968. clear_bit(R5_Insync, &dev->flags);
  2969. if (!rdev)
  2970. /* Not in-sync */;
  2971. else if (is_bad) {
  2972. /* also not in-sync */
  2973. if (!test_bit(WriteErrorSeen, &rdev->flags) &&
  2974. test_bit(R5_UPTODATE, &dev->flags)) {
  2975. /* treat as in-sync, but with a read error
  2976. * which we can now try to correct
  2977. */
  2978. set_bit(R5_Insync, &dev->flags);
  2979. set_bit(R5_ReadError, &dev->flags);
  2980. }
  2981. } else if (test_bit(In_sync, &rdev->flags))
  2982. set_bit(R5_Insync, &dev->flags);
  2983. else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
  2984. /* in sync if before recovery_offset */
  2985. set_bit(R5_Insync, &dev->flags);
  2986. else if (test_bit(R5_UPTODATE, &dev->flags) &&
  2987. test_bit(R5_Expanded, &dev->flags))
  2988. /* If we've reshaped into here, we assume it is Insync.
  2989. * We will shortly update recovery_offset to make
  2990. * it official.
  2991. */
  2992. set_bit(R5_Insync, &dev->flags);
  2993. if (rdev && test_bit(R5_WriteError, &dev->flags)) {
  2994. /* This flag does not apply to '.replacement'
  2995. * only to .rdev, so make sure to check that*/
  2996. struct md_rdev *rdev2 = rcu_dereference(
  2997. conf->disks[i].rdev);
  2998. if (rdev2 == rdev)
  2999. clear_bit(R5_Insync, &dev->flags);
  3000. if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
  3001. s->handle_bad_blocks = 1;
  3002. atomic_inc(&rdev2->nr_pending);
  3003. } else
  3004. clear_bit(R5_WriteError, &dev->flags);
  3005. }
  3006. if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
  3007. /* This flag does not apply to '.replacement'
  3008. * only to .rdev, so make sure to check that*/
  3009. struct md_rdev *rdev2 = rcu_dereference(
  3010. conf->disks[i].rdev);
  3011. if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
  3012. s->handle_bad_blocks = 1;
  3013. atomic_inc(&rdev2->nr_pending);
  3014. } else
  3015. clear_bit(R5_MadeGood, &dev->flags);
  3016. }
  3017. if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
  3018. struct md_rdev *rdev2 = rcu_dereference(
  3019. conf->disks[i].replacement);
  3020. if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
  3021. s->handle_bad_blocks = 1;
  3022. atomic_inc(&rdev2->nr_pending);
  3023. } else
  3024. clear_bit(R5_MadeGoodRepl, &dev->flags);
  3025. }
  3026. if (!test_bit(R5_Insync, &dev->flags)) {
  3027. /* The ReadError flag will just be confusing now */
  3028. clear_bit(R5_ReadError, &dev->flags);
  3029. clear_bit(R5_ReWrite, &dev->flags);
  3030. }
  3031. if (test_bit(R5_ReadError, &dev->flags))
  3032. clear_bit(R5_Insync, &dev->flags);
  3033. if (!test_bit(R5_Insync, &dev->flags)) {
  3034. if (s->failed < 2)
  3035. s->failed_num[s->failed] = i;
  3036. s->failed++;
  3037. if (rdev && !test_bit(Faulty, &rdev->flags))
  3038. do_recovery = 1;
  3039. }
  3040. }
  3041. if (test_bit(STRIPE_SYNCING, &sh->state)) {
  3042. /* If there is a failed device being replaced,
  3043. * we must be recovering.
  3044. * else if we are after recovery_cp, we must be syncing
  3045. * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
  3046. * else we can only be replacing
  3047. * sync and recovery both need to read all devices, and so
  3048. * use the same flag.
  3049. */
  3050. if (do_recovery ||
  3051. sh->sector >= conf->mddev->recovery_cp ||
  3052. test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
  3053. s->syncing = 1;
  3054. else
  3055. s->replacing = 1;
  3056. }
  3057. rcu_read_unlock();
  3058. }
  3059. static void handle_stripe(struct stripe_head *sh)
  3060. {
  3061. struct stripe_head_state s;
  3062. struct r5conf *conf = sh->raid_conf;
  3063. int i;
  3064. int prexor;
  3065. int disks = sh->disks;
  3066. struct r5dev *pdev, *qdev;
  3067. clear_bit(STRIPE_HANDLE, &sh->state);
  3068. if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
  3069. /* already being handled, ensure it gets handled
  3070. * again when current action finishes */
  3071. set_bit(STRIPE_HANDLE, &sh->state);
  3072. return;
  3073. }
  3074. if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
  3075. set_bit(STRIPE_SYNCING, &sh->state);
  3076. clear_bit(STRIPE_INSYNC, &sh->state);
  3077. }
  3078. clear_bit(STRIPE_DELAYED, &sh->state);
  3079. pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
  3080. "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
  3081. (unsigned long long)sh->sector, sh->state,
  3082. atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
  3083. sh->check_state, sh->reconstruct_state);
  3084. analyse_stripe(sh, &s);
  3085. if (s.handle_bad_blocks) {
  3086. set_bit(STRIPE_HANDLE, &sh->state);
  3087. goto finish;
  3088. }
  3089. if (unlikely(s.blocked_rdev)) {
  3090. if (s.syncing || s.expanding || s.expanded ||
  3091. s.replacing || s.to_write || s.written) {
  3092. set_bit(STRIPE_HANDLE, &sh->state);
  3093. goto finish;
  3094. }
  3095. /* There is nothing for the blocked_rdev to block */
  3096. rdev_dec_pending(s.blocked_rdev, conf->mddev);
  3097. s.blocked_rdev = NULL;
  3098. }
  3099. if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
  3100. set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
  3101. set_bit(STRIPE_BIOFILL_RUN, &sh->state);
  3102. }
  3103. pr_debug("locked=%d uptodate=%d to_read=%d"
  3104. " to_write=%d failed=%d failed_num=%d,%d\n",
  3105. s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
  3106. s.failed_num[0], s.failed_num[1]);
  3107. /* check if the array has lost more than max_degraded devices and,
  3108. * if so, some requests might need to be failed.
  3109. */
  3110. if (s.failed > conf->max_degraded) {
  3111. sh->check_state = 0;
  3112. sh->reconstruct_state = 0;
  3113. if (s.to_read+s.to_write+s.written)
  3114. handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
  3115. if (s.syncing + s.replacing)
  3116. handle_failed_sync(conf, sh, &s);
  3117. }
  3118. /* Now we check to see if any write operations have recently
  3119. * completed
  3120. */
  3121. prexor = 0;
  3122. if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
  3123. prexor = 1;
  3124. if (sh->reconstruct_state == reconstruct_state_drain_result ||
  3125. sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
  3126. sh->reconstruct_state = reconstruct_state_idle;
  3127. /* All the 'written' buffers and the parity block are ready to
  3128. * be written back to disk
  3129. */
  3130. BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
  3131. !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
  3132. BUG_ON(sh->qd_idx >= 0 &&
  3133. !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
  3134. !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
  3135. for (i = disks; i--; ) {
  3136. struct r5dev *dev = &sh->dev[i];
  3137. if (test_bit(R5_LOCKED, &dev->flags) &&
  3138. (i == sh->pd_idx || i == sh->qd_idx ||
  3139. dev->written)) {
  3140. pr_debug("Writing block %d\n", i);
  3141. set_bit(R5_Wantwrite, &dev->flags);
  3142. if (prexor)
  3143. continue;
  3144. if (!test_bit(R5_Insync, &dev->flags) ||
  3145. ((i == sh->pd_idx || i == sh->qd_idx) &&
  3146. s.failed == 0))
  3147. set_bit(STRIPE_INSYNC, &sh->state);
  3148. }
  3149. }
  3150. if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  3151. s.dec_preread_active = 1;
  3152. }
  3153. /*
  3154. * might be able to return some write requests if the parity blocks
  3155. * are safe, or on a failed drive
  3156. */
  3157. pdev = &sh->dev[sh->pd_idx];
  3158. s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
  3159. || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
  3160. qdev = &sh->dev[sh->qd_idx];
  3161. s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
  3162. || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
  3163. || conf->level < 6;
  3164. if (s.written &&
  3165. (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
  3166. && !test_bit(R5_LOCKED, &pdev->flags)
  3167. && (test_bit(R5_UPTODATE, &pdev->flags) ||
  3168. test_bit(R5_Discard, &pdev->flags))))) &&
  3169. (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
  3170. && !test_bit(R5_LOCKED, &qdev->flags)
  3171. && (test_bit(R5_UPTODATE, &qdev->flags) ||
  3172. test_bit(R5_Discard, &qdev->flags))))))
  3173. handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
  3174. /* Now we might consider reading some blocks, either to check/generate
  3175. * parity, or to satisfy requests
  3176. * or to load a block that is being partially written.
  3177. */
  3178. if (s.to_read || s.non_overwrite
  3179. || (conf->level == 6 && s.to_write && s.failed)
  3180. || (s.syncing && (s.uptodate + s.compute < disks))
  3181. || s.replacing
  3182. || s.expanding)
  3183. handle_stripe_fill(sh, &s, disks);
  3184. /* Now to consider new write requests and what else, if anything
  3185. * should be read. We do not handle new writes when:
  3186. * 1/ A 'write' operation (copy+xor) is already in flight.
  3187. * 2/ A 'check' operation is in flight, as it may clobber the parity
  3188. * block.
  3189. */
  3190. if (s.to_write && !sh->reconstruct_state && !sh->check_state)
  3191. handle_stripe_dirtying(conf, sh, &s, disks);
  3192. /* maybe we need to check and possibly fix the parity for this stripe
  3193. * Any reads will already have been scheduled, so we just see if enough
  3194. * data is available. The parity check is held off while parity
  3195. * dependent operations are in flight.
  3196. */
  3197. if (sh->check_state ||
  3198. (s.syncing && s.locked == 0 &&
  3199. !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
  3200. !test_bit(STRIPE_INSYNC, &sh->state))) {
  3201. if (conf->level == 6)
  3202. handle_parity_checks6(conf, sh, &s, disks);
  3203. else
  3204. handle_parity_checks5(conf, sh, &s, disks);
  3205. }
  3206. if (s.replacing && s.locked == 0
  3207. && !test_bit(STRIPE_INSYNC, &sh->state)) {
  3208. /* Write out to replacement devices where possible */
  3209. for (i = 0; i < conf->raid_disks; i++)
  3210. if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
  3211. test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
  3212. set_bit(R5_WantReplace, &sh->dev[i].flags);
  3213. set_bit(R5_LOCKED, &sh->dev[i].flags);
  3214. s.locked++;
  3215. }
  3216. set_bit(STRIPE_INSYNC, &sh->state);
  3217. }
  3218. if ((s.syncing || s.replacing) && s.locked == 0 &&
  3219. test_bit(STRIPE_INSYNC, &sh->state)) {
  3220. md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
  3221. clear_bit(STRIPE_SYNCING, &sh->state);
  3222. }
  3223. /* If the failed drives are just a ReadError, then we might need
  3224. * to progress the repair/check process
  3225. */
  3226. if (s.failed <= conf->max_degraded && !conf->mddev->ro)
  3227. for (i = 0; i < s.failed; i++) {
  3228. struct r5dev *dev = &sh->dev[s.failed_num[i]];
  3229. if (test_bit(R5_ReadError, &dev->flags)
  3230. && !test_bit(R5_LOCKED, &dev->flags)
  3231. && test_bit(R5_UPTODATE, &dev->flags)
  3232. ) {
  3233. if (!test_bit(R5_ReWrite, &dev->flags)) {
  3234. set_bit(R5_Wantwrite, &dev->flags);
  3235. set_bit(R5_ReWrite, &dev->flags);
  3236. set_bit(R5_LOCKED, &dev->flags);
  3237. s.locked++;
  3238. } else {
  3239. /* let's read it back */
  3240. set_bit(R5_Wantread, &dev->flags);
  3241. set_bit(R5_LOCKED, &dev->flags);
  3242. s.locked++;
  3243. }
  3244. }
  3245. }
  3246. /* Finish reconstruct operations initiated by the expansion process */
  3247. if (sh->reconstruct_state == reconstruct_state_result) {
  3248. struct stripe_head *sh_src
  3249. = get_active_stripe(conf, sh->sector, 1, 1, 1);
  3250. if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
  3251. /* sh cannot be written until sh_src has been read.
  3252. * so arrange for sh to be delayed a little
  3253. */
  3254. set_bit(STRIPE_DELAYED, &sh->state);
  3255. set_bit(STRIPE_HANDLE, &sh->state);
  3256. if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
  3257. &sh_src->state))
  3258. atomic_inc(&conf->preread_active_stripes);
  3259. release_stripe(sh_src);
  3260. goto finish;
  3261. }
  3262. if (sh_src)
  3263. release_stripe(sh_src);
  3264. sh->reconstruct_state = reconstruct_state_idle;
  3265. clear_bit(STRIPE_EXPANDING, &sh->state);
  3266. for (i = conf->raid_disks; i--; ) {
  3267. set_bit(R5_Wantwrite, &sh->dev[i].flags);
  3268. set_bit(R5_LOCKED, &sh->dev[i].flags);
  3269. s.locked++;
  3270. }
  3271. }
  3272. if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
  3273. !sh->reconstruct_state) {
  3274. /* Need to write out all blocks after computing parity */
  3275. sh->disks = conf->raid_disks;
  3276. stripe_set_idx(sh->sector, conf, 0, sh);
  3277. schedule_reconstruction(sh, &s, 1, 1);
  3278. } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
  3279. clear_bit(STRIPE_EXPAND_READY, &sh->state);
  3280. atomic_dec(&conf->reshape_stripes);
  3281. wake_up(&conf->wait_for_overlap);
  3282. md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
  3283. }
  3284. if (s.expanding && s.locked == 0 &&
  3285. !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
  3286. handle_stripe_expansion(conf, sh);
  3287. finish:
  3288. /* wait for this device to become unblocked */
  3289. if (unlikely(s.blocked_rdev)) {
  3290. if (conf->mddev->external)
  3291. md_wait_for_blocked_rdev(s.blocked_rdev,
  3292. conf->mddev);
  3293. else
  3294. /* Internal metadata will immediately
  3295. * be written by raid5d, so we don't
  3296. * need to wait here.
  3297. */
  3298. rdev_dec_pending(s.blocked_rdev,
  3299. conf->mddev);
  3300. }
  3301. if (s.handle_bad_blocks)
  3302. for (i = disks; i--; ) {
  3303. struct md_rdev *rdev;
  3304. struct r5dev *dev = &sh->dev[i];
  3305. if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
  3306. /* We own a safe reference to the rdev */
  3307. rdev = conf->disks[i].rdev;
  3308. if (!rdev_set_badblocks(rdev, sh->sector,
  3309. STRIPE_SECTORS, 0))
  3310. md_error(conf->mddev, rdev);
  3311. rdev_dec_pending(rdev, conf->mddev);
  3312. }
  3313. if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
  3314. rdev = conf->disks[i].rdev;
  3315. rdev_clear_badblocks(rdev, sh->sector,
  3316. STRIPE_SECTORS, 0);
  3317. rdev_dec_pending(rdev, conf->mddev);
  3318. }
  3319. if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
  3320. rdev = conf->disks[i].replacement;
  3321. if (!rdev)
  3322. /* rdev have been moved down */
  3323. rdev = conf->disks[i].rdev;
  3324. rdev_clear_badblocks(rdev, sh->sector,
  3325. STRIPE_SECTORS, 0);
  3326. rdev_dec_pending(rdev, conf->mddev);
  3327. }
  3328. }
  3329. if (s.ops_request)
  3330. raid_run_ops(sh, s.ops_request);
  3331. ops_run_io(sh, &s);
  3332. if (s.dec_preread_active) {
  3333. /* We delay this until after ops_run_io so that if make_request
  3334. * is waiting on a flush, it won't continue until the writes
  3335. * have actually been submitted.
  3336. */
  3337. atomic_dec(&conf->preread_active_stripes);
  3338. if (atomic_read(&conf->preread_active_stripes) <
  3339. IO_THRESHOLD)
  3340. md_wakeup_thread(conf->mddev->thread);
  3341. }
  3342. return_io(s.return_bi);
  3343. clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
  3344. }
  3345. static void raid5_activate_delayed(struct r5conf *conf)
  3346. {
  3347. if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
  3348. while (!list_empty(&conf->delayed_list)) {
  3349. struct list_head *l = conf->delayed_list.next;
  3350. struct stripe_head *sh;
  3351. sh = list_entry(l, struct stripe_head, lru);
  3352. list_del_init(l);
  3353. clear_bit(STRIPE_DELAYED, &sh->state);
  3354. if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  3355. atomic_inc(&conf->preread_active_stripes);
  3356. list_add_tail(&sh->lru, &conf->hold_list);
  3357. }
  3358. }
  3359. }
  3360. static void activate_bit_delay(struct r5conf *conf)
  3361. {
  3362. /* device_lock is held */
  3363. struct list_head head;
  3364. list_add(&head, &conf->bitmap_list);
  3365. list_del_init(&conf->bitmap_list);
  3366. while (!list_empty(&head)) {
  3367. struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
  3368. list_del_init(&sh->lru);
  3369. atomic_inc(&sh->count);
  3370. __release_stripe(conf, sh);
  3371. }
  3372. }
  3373. int md_raid5_congested(struct mddev *mddev, int bits)
  3374. {
  3375. struct r5conf *conf = mddev->private;
  3376. /* No difference between reads and writes. Just check
  3377. * how busy the stripe_cache is
  3378. */
  3379. if (conf->inactive_blocked)
  3380. return 1;
  3381. if (conf->quiesce)
  3382. return 1;
  3383. if (list_empty_careful(&conf->inactive_list))
  3384. return 1;
  3385. return 0;
  3386. }
  3387. EXPORT_SYMBOL_GPL(md_raid5_congested);
  3388. static int raid5_congested(void *data, int bits)
  3389. {
  3390. struct mddev *mddev = data;
  3391. return mddev_congested(mddev, bits) ||
  3392. md_raid5_congested(mddev, bits);
  3393. }
  3394. /* We want read requests to align with chunks where possible,
  3395. * but write requests don't need to.
  3396. */
  3397. static int raid5_mergeable_bvec(struct request_queue *q,
  3398. struct bvec_merge_data *bvm,
  3399. struct bio_vec *biovec)
  3400. {
  3401. struct mddev *mddev = q->queuedata;
  3402. sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
  3403. int max;
  3404. unsigned int chunk_sectors = mddev->chunk_sectors;
  3405. unsigned int bio_sectors = bvm->bi_size >> 9;
  3406. if ((bvm->bi_rw & 1) == WRITE)
  3407. return biovec->bv_len; /* always allow writes to be mergeable */
  3408. if (mddev->new_chunk_sectors < mddev->chunk_sectors)
  3409. chunk_sectors = mddev->new_chunk_sectors;
  3410. max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
  3411. if (max < 0) max = 0;
  3412. if (max <= biovec->bv_len && bio_sectors == 0)
  3413. return biovec->bv_len;
  3414. else
  3415. return max;
  3416. }
  3417. static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
  3418. {
  3419. sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
  3420. unsigned int chunk_sectors = mddev->chunk_sectors;
  3421. unsigned int bio_sectors = bio_sectors(bio);
  3422. if (mddev->new_chunk_sectors < mddev->chunk_sectors)
  3423. chunk_sectors = mddev->new_chunk_sectors;
  3424. return chunk_sectors >=
  3425. ((sector & (chunk_sectors - 1)) + bio_sectors);
  3426. }
  3427. /*
  3428. * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
  3429. * later sampled by raid5d.
  3430. */
  3431. static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
  3432. {
  3433. unsigned long flags;
  3434. spin_lock_irqsave(&conf->device_lock, flags);
  3435. bi->bi_next = conf->retry_read_aligned_list;
  3436. conf->retry_read_aligned_list = bi;
  3437. spin_unlock_irqrestore(&conf->device_lock, flags);
  3438. md_wakeup_thread(conf->mddev->thread);
  3439. }
  3440. static struct bio *remove_bio_from_retry(struct r5conf *conf)
  3441. {
  3442. struct bio *bi;
  3443. bi = conf->retry_read_aligned;
  3444. if (bi) {
  3445. conf->retry_read_aligned = NULL;
  3446. return bi;
  3447. }
  3448. bi = conf->retry_read_aligned_list;
  3449. if(bi) {
  3450. conf->retry_read_aligned_list = bi->bi_next;
  3451. bi->bi_next = NULL;
  3452. /*
  3453. * this sets the active strip count to 1 and the processed
  3454. * strip count to zero (upper 8 bits)
  3455. */
  3456. raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
  3457. }
  3458. return bi;
  3459. }
  3460. /*
  3461. * The "raid5_align_endio" should check if the read succeeded and if it
  3462. * did, call bio_endio on the original bio (having bio_put the new bio
  3463. * first).
  3464. * If the read failed..
  3465. */
  3466. static void raid5_align_endio(struct bio *bi, int error)
  3467. {
  3468. struct bio* raid_bi = bi->bi_private;
  3469. struct mddev *mddev;
  3470. struct r5conf *conf;
  3471. int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
  3472. struct md_rdev *rdev;
  3473. bio_put(bi);
  3474. rdev = (void*)raid_bi->bi_next;
  3475. raid_bi->bi_next = NULL;
  3476. mddev = rdev->mddev;
  3477. conf = mddev->private;
  3478. rdev_dec_pending(rdev, conf->mddev);
  3479. if (!error && uptodate) {
  3480. bio_endio(raid_bi, 0);
  3481. if (atomic_dec_and_test(&conf->active_aligned_reads))
  3482. wake_up(&conf->wait_for_stripe);
  3483. return;
  3484. }
  3485. pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
  3486. add_bio_to_retry(raid_bi, conf);
  3487. }
  3488. static int bio_fits_rdev(struct bio *bi)
  3489. {
  3490. struct request_queue *q = bdev_get_queue(bi->bi_bdev);
  3491. if (bio_sectors(bi) > queue_max_sectors(q))
  3492. return 0;
  3493. blk_recount_segments(q, bi);
  3494. if (bi->bi_phys_segments > queue_max_segments(q))
  3495. return 0;
  3496. if (q->merge_bvec_fn)
  3497. /* it's too hard to apply the merge_bvec_fn at this stage,
  3498. * just just give up
  3499. */
  3500. return 0;
  3501. return 1;
  3502. }
  3503. static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
  3504. {
  3505. struct r5conf *conf = mddev->private;
  3506. int dd_idx;
  3507. struct bio* align_bi;
  3508. struct md_rdev *rdev;
  3509. sector_t end_sector;
  3510. if (!in_chunk_boundary(mddev, raid_bio)) {
  3511. pr_debug("chunk_aligned_read : non aligned\n");
  3512. return 0;
  3513. }
  3514. /*
  3515. * use bio_clone_mddev to make a copy of the bio
  3516. */
  3517. align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
  3518. if (!align_bi)
  3519. return 0;
  3520. /*
  3521. * set bi_end_io to a new function, and set bi_private to the
  3522. * original bio.
  3523. */
  3524. align_bi->bi_end_io = raid5_align_endio;
  3525. align_bi->bi_private = raid_bio;
  3526. /*
  3527. * compute position
  3528. */
  3529. align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
  3530. 0,
  3531. &dd_idx, NULL);
  3532. end_sector = bio_end_sector(align_bi);
  3533. rcu_read_lock();
  3534. rdev = rcu_dereference(conf->disks[dd_idx].replacement);
  3535. if (!rdev || test_bit(Faulty, &rdev->flags) ||
  3536. rdev->recovery_offset < end_sector) {
  3537. rdev = rcu_dereference(conf->disks[dd_idx].rdev);
  3538. if (rdev &&
  3539. (test_bit(Faulty, &rdev->flags) ||
  3540. !(test_bit(In_sync, &rdev->flags) ||
  3541. rdev->recovery_offset >= end_sector)))
  3542. rdev = NULL;
  3543. }
  3544. if (rdev) {
  3545. sector_t first_bad;
  3546. int bad_sectors;
  3547. atomic_inc(&rdev->nr_pending);
  3548. rcu_read_unlock();
  3549. raid_bio->bi_next = (void*)rdev;
  3550. align_bi->bi_bdev = rdev->bdev;
  3551. align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
  3552. if (!bio_fits_rdev(align_bi) ||
  3553. is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
  3554. &first_bad, &bad_sectors)) {
  3555. /* too big in some way, or has a known bad block */
  3556. bio_put(align_bi);
  3557. rdev_dec_pending(rdev, mddev);
  3558. return 0;
  3559. }
  3560. /* No reshape active, so we can trust rdev->data_offset */
  3561. align_bi->bi_sector += rdev->data_offset;
  3562. spin_lock_irq(&conf->device_lock);
  3563. wait_event_lock_irq(conf->wait_for_stripe,
  3564. conf->quiesce == 0,
  3565. conf->device_lock);
  3566. atomic_inc(&conf->active_aligned_reads);
  3567. spin_unlock_irq(&conf->device_lock);
  3568. trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
  3569. align_bi, disk_devt(mddev->gendisk),
  3570. raid_bio->bi_sector);
  3571. generic_make_request(align_bi);
  3572. return 1;
  3573. } else {
  3574. rcu_read_unlock();
  3575. bio_put(align_bi);
  3576. return 0;
  3577. }
  3578. }
  3579. /* __get_priority_stripe - get the next stripe to process
  3580. *
  3581. * Full stripe writes are allowed to pass preread active stripes up until
  3582. * the bypass_threshold is exceeded. In general the bypass_count
  3583. * increments when the handle_list is handled before the hold_list; however, it
  3584. * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
  3585. * stripe with in flight i/o. The bypass_count will be reset when the
  3586. * head of the hold_list has changed, i.e. the head was promoted to the
  3587. * handle_list.
  3588. */
  3589. static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
  3590. {
  3591. struct stripe_head *sh;
  3592. pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
  3593. __func__,
  3594. list_empty(&conf->handle_list) ? "empty" : "busy",
  3595. list_empty(&conf->hold_list) ? "empty" : "busy",
  3596. atomic_read(&conf->pending_full_writes), conf->bypass_count);
  3597. if (!list_empty(&conf->handle_list)) {
  3598. sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
  3599. if (list_empty(&conf->hold_list))
  3600. conf->bypass_count = 0;
  3601. else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
  3602. if (conf->hold_list.next == conf->last_hold)
  3603. conf->bypass_count++;
  3604. else {
  3605. conf->last_hold = conf->hold_list.next;
  3606. conf->bypass_count -= conf->bypass_threshold;
  3607. if (conf->bypass_count < 0)
  3608. conf->bypass_count = 0;
  3609. }
  3610. }
  3611. } else if (!list_empty(&conf->hold_list) &&
  3612. ((conf->bypass_threshold &&
  3613. conf->bypass_count > conf->bypass_threshold) ||
  3614. atomic_read(&conf->pending_full_writes) == 0)) {
  3615. sh = list_entry(conf->hold_list.next,
  3616. typeof(*sh), lru);
  3617. conf->bypass_count -= conf->bypass_threshold;
  3618. if (conf->bypass_count < 0)
  3619. conf->bypass_count = 0;
  3620. } else
  3621. return NULL;
  3622. list_del_init(&sh->lru);
  3623. atomic_inc(&sh->count);
  3624. BUG_ON(atomic_read(&sh->count) != 1);
  3625. return sh;
  3626. }
  3627. struct raid5_plug_cb {
  3628. struct blk_plug_cb cb;
  3629. struct list_head list;
  3630. };
  3631. static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
  3632. {
  3633. struct raid5_plug_cb *cb = container_of(
  3634. blk_cb, struct raid5_plug_cb, cb);
  3635. struct stripe_head *sh;
  3636. struct mddev *mddev = cb->cb.data;
  3637. struct r5conf *conf = mddev->private;
  3638. int cnt = 0;
  3639. if (cb->list.next && !list_empty(&cb->list)) {
  3640. spin_lock_irq(&conf->device_lock);
  3641. while (!list_empty(&cb->list)) {
  3642. sh = list_first_entry(&cb->list, struct stripe_head, lru);
  3643. list_del_init(&sh->lru);
  3644. /*
  3645. * avoid race release_stripe_plug() sees
  3646. * STRIPE_ON_UNPLUG_LIST clear but the stripe
  3647. * is still in our list
  3648. */
  3649. smp_mb__before_clear_bit();
  3650. clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
  3651. __release_stripe(conf, sh);
  3652. cnt++;
  3653. }
  3654. spin_unlock_irq(&conf->device_lock);
  3655. }
  3656. trace_block_unplug(mddev->queue, cnt, !from_schedule);
  3657. kfree(cb);
  3658. }
  3659. static void release_stripe_plug(struct mddev *mddev,
  3660. struct stripe_head *sh)
  3661. {
  3662. struct blk_plug_cb *blk_cb = blk_check_plugged(
  3663. raid5_unplug, mddev,
  3664. sizeof(struct raid5_plug_cb));
  3665. struct raid5_plug_cb *cb;
  3666. if (!blk_cb) {
  3667. release_stripe(sh);
  3668. return;
  3669. }
  3670. cb = container_of(blk_cb, struct raid5_plug_cb, cb);
  3671. if (cb->list.next == NULL)
  3672. INIT_LIST_HEAD(&cb->list);
  3673. if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
  3674. list_add_tail(&sh->lru, &cb->list);
  3675. else
  3676. release_stripe(sh);
  3677. }
  3678. static void make_discard_request(struct mddev *mddev, struct bio *bi)
  3679. {
  3680. struct r5conf *conf = mddev->private;
  3681. sector_t logical_sector, last_sector;
  3682. struct stripe_head *sh;
  3683. int remaining;
  3684. int stripe_sectors;
  3685. if (mddev->reshape_position != MaxSector)
  3686. /* Skip discard while reshape is happening */
  3687. return;
  3688. logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
  3689. last_sector = bi->bi_sector + (bi->bi_size>>9);
  3690. bi->bi_next = NULL;
  3691. bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
  3692. stripe_sectors = conf->chunk_sectors *
  3693. (conf->raid_disks - conf->max_degraded);
  3694. logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
  3695. stripe_sectors);
  3696. sector_div(last_sector, stripe_sectors);
  3697. logical_sector *= conf->chunk_sectors;
  3698. last_sector *= conf->chunk_sectors;
  3699. for (; logical_sector < last_sector;
  3700. logical_sector += STRIPE_SECTORS) {
  3701. DEFINE_WAIT(w);
  3702. int d;
  3703. again:
  3704. sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
  3705. prepare_to_wait(&conf->wait_for_overlap, &w,
  3706. TASK_UNINTERRUPTIBLE);
  3707. spin_lock_irq(&sh->stripe_lock);
  3708. for (d = 0; d < conf->raid_disks; d++) {
  3709. if (d == sh->pd_idx || d == sh->qd_idx)
  3710. continue;
  3711. if (sh->dev[d].towrite || sh->dev[d].toread) {
  3712. set_bit(R5_Overlap, &sh->dev[d].flags);
  3713. spin_unlock_irq(&sh->stripe_lock);
  3714. release_stripe(sh);
  3715. schedule();
  3716. goto again;
  3717. }
  3718. }
  3719. finish_wait(&conf->wait_for_overlap, &w);
  3720. for (d = 0; d < conf->raid_disks; d++) {
  3721. if (d == sh->pd_idx || d == sh->qd_idx)
  3722. continue;
  3723. sh->dev[d].towrite = bi;
  3724. set_bit(R5_OVERWRITE, &sh->dev[d].flags);
  3725. raid5_inc_bi_active_stripes(bi);
  3726. }
  3727. spin_unlock_irq(&sh->stripe_lock);
  3728. if (conf->mddev->bitmap) {
  3729. for (d = 0;
  3730. d < conf->raid_disks - conf->max_degraded;
  3731. d++)
  3732. bitmap_startwrite(mddev->bitmap,
  3733. sh->sector,
  3734. STRIPE_SECTORS,
  3735. 0);
  3736. sh->bm_seq = conf->seq_flush + 1;
  3737. set_bit(STRIPE_BIT_DELAY, &sh->state);
  3738. }
  3739. set_bit(STRIPE_HANDLE, &sh->state);
  3740. clear_bit(STRIPE_DELAYED, &sh->state);
  3741. if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  3742. atomic_inc(&conf->preread_active_stripes);
  3743. release_stripe_plug(mddev, sh);
  3744. }
  3745. remaining = raid5_dec_bi_active_stripes(bi);
  3746. if (remaining == 0) {
  3747. md_write_end(mddev);
  3748. bio_endio(bi, 0);
  3749. }
  3750. }
  3751. static void make_request(struct mddev *mddev, struct bio * bi)
  3752. {
  3753. struct r5conf *conf = mddev->private;
  3754. int dd_idx;
  3755. sector_t new_sector;
  3756. sector_t logical_sector, last_sector;
  3757. struct stripe_head *sh;
  3758. const int rw = bio_data_dir(bi);
  3759. int remaining;
  3760. if (unlikely(bi->bi_rw & REQ_FLUSH)) {
  3761. md_flush_request(mddev, bi);
  3762. return;
  3763. }
  3764. md_write_start(mddev, bi);
  3765. if (rw == READ &&
  3766. mddev->reshape_position == MaxSector &&
  3767. chunk_aligned_read(mddev,bi))
  3768. return;
  3769. if (unlikely(bi->bi_rw & REQ_DISCARD)) {
  3770. make_discard_request(mddev, bi);
  3771. return;
  3772. }
  3773. logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
  3774. last_sector = bio_end_sector(bi);
  3775. bi->bi_next = NULL;
  3776. bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
  3777. for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
  3778. DEFINE_WAIT(w);
  3779. int previous;
  3780. retry:
  3781. previous = 0;
  3782. prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
  3783. if (unlikely(conf->reshape_progress != MaxSector)) {
  3784. /* spinlock is needed as reshape_progress may be
  3785. * 64bit on a 32bit platform, and so it might be
  3786. * possible to see a half-updated value
  3787. * Of course reshape_progress could change after
  3788. * the lock is dropped, so once we get a reference
  3789. * to the stripe that we think it is, we will have
  3790. * to check again.
  3791. */
  3792. spin_lock_irq(&conf->device_lock);
  3793. if (mddev->reshape_backwards
  3794. ? logical_sector < conf->reshape_progress
  3795. : logical_sector >= conf->reshape_progress) {
  3796. previous = 1;
  3797. } else {
  3798. if (mddev->reshape_backwards
  3799. ? logical_sector < conf->reshape_safe
  3800. : logical_sector >= conf->reshape_safe) {
  3801. spin_unlock_irq(&conf->device_lock);
  3802. schedule();
  3803. goto retry;
  3804. }
  3805. }
  3806. spin_unlock_irq(&conf->device_lock);
  3807. }
  3808. new_sector = raid5_compute_sector(conf, logical_sector,
  3809. previous,
  3810. &dd_idx, NULL);
  3811. pr_debug("raid456: make_request, sector %llu logical %llu\n",
  3812. (unsigned long long)new_sector,
  3813. (unsigned long long)logical_sector);
  3814. sh = get_active_stripe(conf, new_sector, previous,
  3815. (bi->bi_rw&RWA_MASK), 0);
  3816. if (sh) {
  3817. if (unlikely(previous)) {
  3818. /* expansion might have moved on while waiting for a
  3819. * stripe, so we must do the range check again.
  3820. * Expansion could still move past after this
  3821. * test, but as we are holding a reference to
  3822. * 'sh', we know that if that happens,
  3823. * STRIPE_EXPANDING will get set and the expansion
  3824. * won't proceed until we finish with the stripe.
  3825. */
  3826. int must_retry = 0;
  3827. spin_lock_irq(&conf->device_lock);
  3828. if (mddev->reshape_backwards
  3829. ? logical_sector >= conf->reshape_progress
  3830. : logical_sector < conf->reshape_progress)
  3831. /* mismatch, need to try again */
  3832. must_retry = 1;
  3833. spin_unlock_irq(&conf->device_lock);
  3834. if (must_retry) {
  3835. release_stripe(sh);
  3836. schedule();
  3837. goto retry;
  3838. }
  3839. }
  3840. if (rw == WRITE &&
  3841. logical_sector >= mddev->suspend_lo &&
  3842. logical_sector < mddev->suspend_hi) {
  3843. release_stripe(sh);
  3844. /* As the suspend_* range is controlled by
  3845. * userspace, we want an interruptible
  3846. * wait.
  3847. */
  3848. flush_signals(current);
  3849. prepare_to_wait(&conf->wait_for_overlap,
  3850. &w, TASK_INTERRUPTIBLE);
  3851. if (logical_sector >= mddev->suspend_lo &&
  3852. logical_sector < mddev->suspend_hi)
  3853. schedule();
  3854. goto retry;
  3855. }
  3856. if (test_bit(STRIPE_EXPANDING, &sh->state) ||
  3857. !add_stripe_bio(sh, bi, dd_idx, rw)) {
  3858. /* Stripe is busy expanding or
  3859. * add failed due to overlap. Flush everything
  3860. * and wait a while
  3861. */
  3862. md_wakeup_thread(mddev->thread);
  3863. release_stripe(sh);
  3864. schedule();
  3865. goto retry;
  3866. }
  3867. finish_wait(&conf->wait_for_overlap, &w);
  3868. set_bit(STRIPE_HANDLE, &sh->state);
  3869. clear_bit(STRIPE_DELAYED, &sh->state);
  3870. if ((bi->bi_rw & REQ_SYNC) &&
  3871. !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
  3872. atomic_inc(&conf->preread_active_stripes);
  3873. release_stripe_plug(mddev, sh);
  3874. } else {
  3875. /* cannot get stripe for read-ahead, just give-up */
  3876. clear_bit(BIO_UPTODATE, &bi->bi_flags);
  3877. finish_wait(&conf->wait_for_overlap, &w);
  3878. break;
  3879. }
  3880. }
  3881. remaining = raid5_dec_bi_active_stripes(bi);
  3882. if (remaining == 0) {
  3883. if ( rw == WRITE )
  3884. md_write_end(mddev);
  3885. bio_endio(bi, 0);
  3886. }
  3887. }
  3888. static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
  3889. static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
  3890. {
  3891. /* reshaping is quite different to recovery/resync so it is
  3892. * handled quite separately ... here.
  3893. *
  3894. * On each call to sync_request, we gather one chunk worth of
  3895. * destination stripes and flag them as expanding.
  3896. * Then we find all the source stripes and request reads.
  3897. * As the reads complete, handle_stripe will copy the data
  3898. * into the destination stripe and release that stripe.
  3899. */
  3900. struct r5conf *conf = mddev->private;
  3901. struct stripe_head *sh;
  3902. sector_t first_sector, last_sector;
  3903. int raid_disks = conf->previous_raid_disks;
  3904. int data_disks = raid_disks - conf->max_degraded;
  3905. int new_data_disks = conf->raid_disks - conf->max_degraded;
  3906. int i;
  3907. int dd_idx;
  3908. sector_t writepos, readpos, safepos;
  3909. sector_t stripe_addr;
  3910. int reshape_sectors;
  3911. struct list_head stripes;
  3912. if (sector_nr == 0) {
  3913. /* If restarting in the middle, skip the initial sectors */
  3914. if (mddev->reshape_backwards &&
  3915. conf->reshape_progress < raid5_size(mddev, 0, 0)) {
  3916. sector_nr = raid5_size(mddev, 0, 0)
  3917. - conf->reshape_progress;
  3918. } else if (!mddev->reshape_backwards &&
  3919. conf->reshape_progress > 0)
  3920. sector_nr = conf->reshape_progress;
  3921. sector_div(sector_nr, new_data_disks);
  3922. if (sector_nr) {
  3923. mddev->curr_resync_completed = sector_nr;
  3924. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  3925. *skipped = 1;
  3926. return sector_nr;
  3927. }
  3928. }
  3929. /* We need to process a full chunk at a time.
  3930. * If old and new chunk sizes differ, we need to process the
  3931. * largest of these
  3932. */
  3933. if (mddev->new_chunk_sectors > mddev->chunk_sectors)
  3934. reshape_sectors = mddev->new_chunk_sectors;
  3935. else
  3936. reshape_sectors = mddev->chunk_sectors;
  3937. /* We update the metadata at least every 10 seconds, or when
  3938. * the data about to be copied would over-write the source of
  3939. * the data at the front of the range. i.e. one new_stripe
  3940. * along from reshape_progress new_maps to after where
  3941. * reshape_safe old_maps to
  3942. */
  3943. writepos = conf->reshape_progress;
  3944. sector_div(writepos, new_data_disks);
  3945. readpos = conf->reshape_progress;
  3946. sector_div(readpos, data_disks);
  3947. safepos = conf->reshape_safe;
  3948. sector_div(safepos, data_disks);
  3949. if (mddev->reshape_backwards) {
  3950. writepos -= min_t(sector_t, reshape_sectors, writepos);
  3951. readpos += reshape_sectors;
  3952. safepos += reshape_sectors;
  3953. } else {
  3954. writepos += reshape_sectors;
  3955. readpos -= min_t(sector_t, reshape_sectors, readpos);
  3956. safepos -= min_t(sector_t, reshape_sectors, safepos);
  3957. }
  3958. /* Having calculated the 'writepos' possibly use it
  3959. * to set 'stripe_addr' which is where we will write to.
  3960. */
  3961. if (mddev->reshape_backwards) {
  3962. BUG_ON(conf->reshape_progress == 0);
  3963. stripe_addr = writepos;
  3964. BUG_ON((mddev->dev_sectors &
  3965. ~((sector_t)reshape_sectors - 1))
  3966. - reshape_sectors - stripe_addr
  3967. != sector_nr);
  3968. } else {
  3969. BUG_ON(writepos != sector_nr + reshape_sectors);
  3970. stripe_addr = sector_nr;
  3971. }
  3972. /* 'writepos' is the most advanced device address we might write.
  3973. * 'readpos' is the least advanced device address we might read.
  3974. * 'safepos' is the least address recorded in the metadata as having
  3975. * been reshaped.
  3976. * If there is a min_offset_diff, these are adjusted either by
  3977. * increasing the safepos/readpos if diff is negative, or
  3978. * increasing writepos if diff is positive.
  3979. * If 'readpos' is then behind 'writepos', there is no way that we can
  3980. * ensure safety in the face of a crash - that must be done by userspace
  3981. * making a backup of the data. So in that case there is no particular
  3982. * rush to update metadata.
  3983. * Otherwise if 'safepos' is behind 'writepos', then we really need to
  3984. * update the metadata to advance 'safepos' to match 'readpos' so that
  3985. * we can be safe in the event of a crash.
  3986. * So we insist on updating metadata if safepos is behind writepos and
  3987. * readpos is beyond writepos.
  3988. * In any case, update the metadata every 10 seconds.
  3989. * Maybe that number should be configurable, but I'm not sure it is
  3990. * worth it.... maybe it could be a multiple of safemode_delay???
  3991. */
  3992. if (conf->min_offset_diff < 0) {
  3993. safepos += -conf->min_offset_diff;
  3994. readpos += -conf->min_offset_diff;
  3995. } else
  3996. writepos += conf->min_offset_diff;
  3997. if ((mddev->reshape_backwards
  3998. ? (safepos > writepos && readpos < writepos)
  3999. : (safepos < writepos && readpos > writepos)) ||
  4000. time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
  4001. /* Cannot proceed until we've updated the superblock... */
  4002. wait_event(conf->wait_for_overlap,
  4003. atomic_read(&conf->reshape_stripes)==0);
  4004. mddev->reshape_position = conf->reshape_progress;
  4005. mddev->curr_resync_completed = sector_nr;
  4006. conf->reshape_checkpoint = jiffies;
  4007. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  4008. md_wakeup_thread(mddev->thread);
  4009. wait_event(mddev->sb_wait, mddev->flags == 0 ||
  4010. kthread_should_stop());
  4011. spin_lock_irq(&conf->device_lock);
  4012. conf->reshape_safe = mddev->reshape_position;
  4013. spin_unlock_irq(&conf->device_lock);
  4014. wake_up(&conf->wait_for_overlap);
  4015. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  4016. }
  4017. INIT_LIST_HEAD(&stripes);
  4018. for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
  4019. int j;
  4020. int skipped_disk = 0;
  4021. sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
  4022. set_bit(STRIPE_EXPANDING, &sh->state);
  4023. atomic_inc(&conf->reshape_stripes);
  4024. /* If any of this stripe is beyond the end of the old
  4025. * array, then we need to zero those blocks
  4026. */
  4027. for (j=sh->disks; j--;) {
  4028. sector_t s;
  4029. if (j == sh->pd_idx)
  4030. continue;
  4031. if (conf->level == 6 &&
  4032. j == sh->qd_idx)
  4033. continue;
  4034. s = compute_blocknr(sh, j, 0);
  4035. if (s < raid5_size(mddev, 0, 0)) {
  4036. skipped_disk = 1;
  4037. continue;
  4038. }
  4039. memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
  4040. set_bit(R5_Expanded, &sh->dev[j].flags);
  4041. set_bit(R5_UPTODATE, &sh->dev[j].flags);
  4042. }
  4043. if (!skipped_disk) {
  4044. set_bit(STRIPE_EXPAND_READY, &sh->state);
  4045. set_bit(STRIPE_HANDLE, &sh->state);
  4046. }
  4047. list_add(&sh->lru, &stripes);
  4048. }
  4049. spin_lock_irq(&conf->device_lock);
  4050. if (mddev->reshape_backwards)
  4051. conf->reshape_progress -= reshape_sectors * new_data_disks;
  4052. else
  4053. conf->reshape_progress += reshape_sectors * new_data_disks;
  4054. spin_unlock_irq(&conf->device_lock);
  4055. /* Ok, those stripe are ready. We can start scheduling
  4056. * reads on the source stripes.
  4057. * The source stripes are determined by mapping the first and last
  4058. * block on the destination stripes.
  4059. */
  4060. first_sector =
  4061. raid5_compute_sector(conf, stripe_addr*(new_data_disks),
  4062. 1, &dd_idx, NULL);
  4063. last_sector =
  4064. raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
  4065. * new_data_disks - 1),
  4066. 1, &dd_idx, NULL);
  4067. if (last_sector >= mddev->dev_sectors)
  4068. last_sector = mddev->dev_sectors - 1;
  4069. while (first_sector <= last_sector) {
  4070. sh = get_active_stripe(conf, first_sector, 1, 0, 1);
  4071. set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
  4072. set_bit(STRIPE_HANDLE, &sh->state);
  4073. release_stripe(sh);
  4074. first_sector += STRIPE_SECTORS;
  4075. }
  4076. /* Now that the sources are clearly marked, we can release
  4077. * the destination stripes
  4078. */
  4079. while (!list_empty(&stripes)) {
  4080. sh = list_entry(stripes.next, struct stripe_head, lru);
  4081. list_del_init(&sh->lru);
  4082. release_stripe(sh);
  4083. }
  4084. /* If this takes us to the resync_max point where we have to pause,
  4085. * then we need to write out the superblock.
  4086. */
  4087. sector_nr += reshape_sectors;
  4088. if ((sector_nr - mddev->curr_resync_completed) * 2
  4089. >= mddev->resync_max - mddev->curr_resync_completed) {
  4090. /* Cannot proceed until we've updated the superblock... */
  4091. wait_event(conf->wait_for_overlap,
  4092. atomic_read(&conf->reshape_stripes) == 0);
  4093. mddev->reshape_position = conf->reshape_progress;
  4094. mddev->curr_resync_completed = sector_nr;
  4095. conf->reshape_checkpoint = jiffies;
  4096. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  4097. md_wakeup_thread(mddev->thread);
  4098. wait_event(mddev->sb_wait,
  4099. !test_bit(MD_CHANGE_DEVS, &mddev->flags)
  4100. || kthread_should_stop());
  4101. spin_lock_irq(&conf->device_lock);
  4102. conf->reshape_safe = mddev->reshape_position;
  4103. spin_unlock_irq(&conf->device_lock);
  4104. wake_up(&conf->wait_for_overlap);
  4105. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  4106. }
  4107. return reshape_sectors;
  4108. }
  4109. /* FIXME go_faster isn't used */
  4110. static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
  4111. {
  4112. struct r5conf *conf = mddev->private;
  4113. struct stripe_head *sh;
  4114. sector_t max_sector = mddev->dev_sectors;
  4115. sector_t sync_blocks;
  4116. int still_degraded = 0;
  4117. int i;
  4118. if (sector_nr >= max_sector) {
  4119. /* just being told to finish up .. nothing much to do */
  4120. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
  4121. end_reshape(conf);
  4122. return 0;
  4123. }
  4124. if (mddev->curr_resync < max_sector) /* aborted */
  4125. bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
  4126. &sync_blocks, 1);
  4127. else /* completed sync */
  4128. conf->fullsync = 0;
  4129. bitmap_close_sync(mddev->bitmap);
  4130. return 0;
  4131. }
  4132. /* Allow raid5_quiesce to complete */
  4133. wait_event(conf->wait_for_overlap, conf->quiesce != 2);
  4134. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  4135. return reshape_request(mddev, sector_nr, skipped);
  4136. /* No need to check resync_max as we never do more than one
  4137. * stripe, and as resync_max will always be on a chunk boundary,
  4138. * if the check in md_do_sync didn't fire, there is no chance
  4139. * of overstepping resync_max here
  4140. */
  4141. /* if there is too many failed drives and we are trying
  4142. * to resync, then assert that we are finished, because there is
  4143. * nothing we can do.
  4144. */
  4145. if (mddev->degraded >= conf->max_degraded &&
  4146. test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  4147. sector_t rv = mddev->dev_sectors - sector_nr;
  4148. *skipped = 1;
  4149. return rv;
  4150. }
  4151. if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
  4152. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
  4153. !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
  4154. /* we can skip this block, and probably more */
  4155. sync_blocks /= STRIPE_SECTORS;
  4156. *skipped = 1;
  4157. return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
  4158. }
  4159. bitmap_cond_end_sync(mddev->bitmap, sector_nr);
  4160. sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
  4161. if (sh == NULL) {
  4162. sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
  4163. /* make sure we don't swamp the stripe cache if someone else
  4164. * is trying to get access
  4165. */
  4166. schedule_timeout_uninterruptible(1);
  4167. }
  4168. /* Need to check if array will still be degraded after recovery/resync
  4169. * We don't need to check the 'failed' flag as when that gets set,
  4170. * recovery aborts.
  4171. */
  4172. for (i = 0; i < conf->raid_disks; i++)
  4173. if (conf->disks[i].rdev == NULL)
  4174. still_degraded = 1;
  4175. bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
  4176. set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
  4177. handle_stripe(sh);
  4178. release_stripe(sh);
  4179. return STRIPE_SECTORS;
  4180. }
  4181. static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
  4182. {
  4183. /* We may not be able to submit a whole bio at once as there
  4184. * may not be enough stripe_heads available.
  4185. * We cannot pre-allocate enough stripe_heads as we may need
  4186. * more than exist in the cache (if we allow ever large chunks).
  4187. * So we do one stripe head at a time and record in
  4188. * ->bi_hw_segments how many have been done.
  4189. *
  4190. * We *know* that this entire raid_bio is in one chunk, so
  4191. * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
  4192. */
  4193. struct stripe_head *sh;
  4194. int dd_idx;
  4195. sector_t sector, logical_sector, last_sector;
  4196. int scnt = 0;
  4197. int remaining;
  4198. int handled = 0;
  4199. logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
  4200. sector = raid5_compute_sector(conf, logical_sector,
  4201. 0, &dd_idx, NULL);
  4202. last_sector = bio_end_sector(raid_bio);
  4203. for (; logical_sector < last_sector;
  4204. logical_sector += STRIPE_SECTORS,
  4205. sector += STRIPE_SECTORS,
  4206. scnt++) {
  4207. if (scnt < raid5_bi_processed_stripes(raid_bio))
  4208. /* already done this stripe */
  4209. continue;
  4210. sh = get_active_stripe(conf, sector, 0, 1, 0);
  4211. if (!sh) {
  4212. /* failed to get a stripe - must wait */
  4213. raid5_set_bi_processed_stripes(raid_bio, scnt);
  4214. conf->retry_read_aligned = raid_bio;
  4215. return handled;
  4216. }
  4217. if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
  4218. release_stripe(sh);
  4219. raid5_set_bi_processed_stripes(raid_bio, scnt);
  4220. conf->retry_read_aligned = raid_bio;
  4221. return handled;
  4222. }
  4223. set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
  4224. handle_stripe(sh);
  4225. release_stripe(sh);
  4226. handled++;
  4227. }
  4228. remaining = raid5_dec_bi_active_stripes(raid_bio);
  4229. if (remaining == 0)
  4230. bio_endio(raid_bio, 0);
  4231. if (atomic_dec_and_test(&conf->active_aligned_reads))
  4232. wake_up(&conf->wait_for_stripe);
  4233. return handled;
  4234. }
  4235. #define MAX_STRIPE_BATCH 8
  4236. static int handle_active_stripes(struct r5conf *conf)
  4237. {
  4238. struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
  4239. int i, batch_size = 0;
  4240. while (batch_size < MAX_STRIPE_BATCH &&
  4241. (sh = __get_priority_stripe(conf)) != NULL)
  4242. batch[batch_size++] = sh;
  4243. if (batch_size == 0)
  4244. return batch_size;
  4245. spin_unlock_irq(&conf->device_lock);
  4246. for (i = 0; i < batch_size; i++)
  4247. handle_stripe(batch[i]);
  4248. cond_resched();
  4249. spin_lock_irq(&conf->device_lock);
  4250. for (i = 0; i < batch_size; i++)
  4251. __release_stripe(conf, batch[i]);
  4252. return batch_size;
  4253. }
  4254. /*
  4255. * This is our raid5 kernel thread.
  4256. *
  4257. * We scan the hash table for stripes which can be handled now.
  4258. * During the scan, completed stripes are saved for us by the interrupt
  4259. * handler, so that they will not have to wait for our next wakeup.
  4260. */
  4261. static void raid5d(struct md_thread *thread)
  4262. {
  4263. struct mddev *mddev = thread->mddev;
  4264. struct r5conf *conf = mddev->private;
  4265. int handled;
  4266. struct blk_plug plug;
  4267. pr_debug("+++ raid5d active\n");
  4268. md_check_recovery(mddev);
  4269. blk_start_plug(&plug);
  4270. handled = 0;
  4271. spin_lock_irq(&conf->device_lock);
  4272. while (1) {
  4273. struct bio *bio;
  4274. int batch_size;
  4275. if (
  4276. !list_empty(&conf->bitmap_list)) {
  4277. /* Now is a good time to flush some bitmap updates */
  4278. conf->seq_flush++;
  4279. spin_unlock_irq(&conf->device_lock);
  4280. bitmap_unplug(mddev->bitmap);
  4281. spin_lock_irq(&conf->device_lock);
  4282. conf->seq_write = conf->seq_flush;
  4283. activate_bit_delay(conf);
  4284. }
  4285. raid5_activate_delayed(conf);
  4286. while ((bio = remove_bio_from_retry(conf))) {
  4287. int ok;
  4288. spin_unlock_irq(&conf->device_lock);
  4289. ok = retry_aligned_read(conf, bio);
  4290. spin_lock_irq(&conf->device_lock);
  4291. if (!ok)
  4292. break;
  4293. handled++;
  4294. }
  4295. batch_size = handle_active_stripes(conf);
  4296. if (!batch_size)
  4297. break;
  4298. handled += batch_size;
  4299. if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
  4300. spin_unlock_irq(&conf->device_lock);
  4301. md_check_recovery(mddev);
  4302. spin_lock_irq(&conf->device_lock);
  4303. }
  4304. }
  4305. pr_debug("%d stripes handled\n", handled);
  4306. spin_unlock_irq(&conf->device_lock);
  4307. async_tx_issue_pending_all();
  4308. blk_finish_plug(&plug);
  4309. pr_debug("--- raid5d inactive\n");
  4310. }
  4311. static ssize_t
  4312. raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
  4313. {
  4314. struct r5conf *conf = mddev->private;
  4315. if (conf)
  4316. return sprintf(page, "%d\n", conf->max_nr_stripes);
  4317. else
  4318. return 0;
  4319. }
  4320. int
  4321. raid5_set_cache_size(struct mddev *mddev, int size)
  4322. {
  4323. struct r5conf *conf = mddev->private;
  4324. int err;
  4325. if (size <= 16 || size > 32768)
  4326. return -EINVAL;
  4327. while (size < conf->max_nr_stripes) {
  4328. if (drop_one_stripe(conf))
  4329. conf->max_nr_stripes--;
  4330. else
  4331. break;
  4332. }
  4333. err = md_allow_write(mddev);
  4334. if (err)
  4335. return err;
  4336. while (size > conf->max_nr_stripes) {
  4337. if (grow_one_stripe(conf))
  4338. conf->max_nr_stripes++;
  4339. else break;
  4340. }
  4341. return 0;
  4342. }
  4343. EXPORT_SYMBOL(raid5_set_cache_size);
  4344. static ssize_t
  4345. raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
  4346. {
  4347. struct r5conf *conf = mddev->private;
  4348. unsigned long new;
  4349. int err;
  4350. if (len >= PAGE_SIZE)
  4351. return -EINVAL;
  4352. if (!conf)
  4353. return -ENODEV;
  4354. if (strict_strtoul(page, 10, &new))
  4355. return -EINVAL;
  4356. err = raid5_set_cache_size(mddev, new);
  4357. if (err)
  4358. return err;
  4359. return len;
  4360. }
  4361. static struct md_sysfs_entry
  4362. raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
  4363. raid5_show_stripe_cache_size,
  4364. raid5_store_stripe_cache_size);
  4365. static ssize_t
  4366. raid5_show_preread_threshold(struct mddev *mddev, char *page)
  4367. {
  4368. struct r5conf *conf = mddev->private;
  4369. if (conf)
  4370. return sprintf(page, "%d\n", conf->bypass_threshold);
  4371. else
  4372. return 0;
  4373. }
  4374. static ssize_t
  4375. raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
  4376. {
  4377. struct r5conf *conf = mddev->private;
  4378. unsigned long new;
  4379. if (len >= PAGE_SIZE)
  4380. return -EINVAL;
  4381. if (!conf)
  4382. return -ENODEV;
  4383. if (strict_strtoul(page, 10, &new))
  4384. return -EINVAL;
  4385. if (new > conf->max_nr_stripes)
  4386. return -EINVAL;
  4387. conf->bypass_threshold = new;
  4388. return len;
  4389. }
  4390. static struct md_sysfs_entry
  4391. raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
  4392. S_IRUGO | S_IWUSR,
  4393. raid5_show_preread_threshold,
  4394. raid5_store_preread_threshold);
  4395. static ssize_t
  4396. stripe_cache_active_show(struct mddev *mddev, char *page)
  4397. {
  4398. struct r5conf *conf = mddev->private;
  4399. if (conf)
  4400. return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
  4401. else
  4402. return 0;
  4403. }
  4404. static struct md_sysfs_entry
  4405. raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
  4406. static struct attribute *raid5_attrs[] = {
  4407. &raid5_stripecache_size.attr,
  4408. &raid5_stripecache_active.attr,
  4409. &raid5_preread_bypass_threshold.attr,
  4410. NULL,
  4411. };
  4412. static struct attribute_group raid5_attrs_group = {
  4413. .name = NULL,
  4414. .attrs = raid5_attrs,
  4415. };
  4416. static sector_t
  4417. raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
  4418. {
  4419. struct r5conf *conf = mddev->private;
  4420. if (!sectors)
  4421. sectors = mddev->dev_sectors;
  4422. if (!raid_disks)
  4423. /* size is defined by the smallest of previous and new size */
  4424. raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
  4425. sectors &= ~((sector_t)mddev->chunk_sectors - 1);
  4426. sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
  4427. return sectors * (raid_disks - conf->max_degraded);
  4428. }
  4429. static void raid5_free_percpu(struct r5conf *conf)
  4430. {
  4431. struct raid5_percpu *percpu;
  4432. unsigned long cpu;
  4433. if (!conf->percpu)
  4434. return;
  4435. get_online_cpus();
  4436. for_each_possible_cpu(cpu) {
  4437. percpu = per_cpu_ptr(conf->percpu, cpu);
  4438. safe_put_page(percpu->spare_page);
  4439. kfree(percpu->scribble);
  4440. }
  4441. #ifdef CONFIG_HOTPLUG_CPU
  4442. unregister_cpu_notifier(&conf->cpu_notify);
  4443. #endif
  4444. put_online_cpus();
  4445. free_percpu(conf->percpu);
  4446. }
  4447. static void free_conf(struct r5conf *conf)
  4448. {
  4449. shrink_stripes(conf);
  4450. raid5_free_percpu(conf);
  4451. kfree(conf->disks);
  4452. kfree(conf->stripe_hashtbl);
  4453. kfree(conf);
  4454. }
  4455. #ifdef CONFIG_HOTPLUG_CPU
  4456. static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
  4457. void *hcpu)
  4458. {
  4459. struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
  4460. long cpu = (long)hcpu;
  4461. struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
  4462. switch (action) {
  4463. case CPU_UP_PREPARE:
  4464. case CPU_UP_PREPARE_FROZEN:
  4465. if (conf->level == 6 && !percpu->spare_page)
  4466. percpu->spare_page = alloc_page(GFP_KERNEL);
  4467. if (!percpu->scribble)
  4468. percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
  4469. if (!percpu->scribble ||
  4470. (conf->level == 6 && !percpu->spare_page)) {
  4471. safe_put_page(percpu->spare_page);
  4472. kfree(percpu->scribble);
  4473. pr_err("%s: failed memory allocation for cpu%ld\n",
  4474. __func__, cpu);
  4475. return notifier_from_errno(-ENOMEM);
  4476. }
  4477. break;
  4478. case CPU_DEAD:
  4479. case CPU_DEAD_FROZEN:
  4480. safe_put_page(percpu->spare_page);
  4481. kfree(percpu->scribble);
  4482. percpu->spare_page = NULL;
  4483. percpu->scribble = NULL;
  4484. break;
  4485. default:
  4486. break;
  4487. }
  4488. return NOTIFY_OK;
  4489. }
  4490. #endif
  4491. static int raid5_alloc_percpu(struct r5conf *conf)
  4492. {
  4493. unsigned long cpu;
  4494. struct page *spare_page;
  4495. struct raid5_percpu __percpu *allcpus;
  4496. void *scribble;
  4497. int err;
  4498. allcpus = alloc_percpu(struct raid5_percpu);
  4499. if (!allcpus)
  4500. return -ENOMEM;
  4501. conf->percpu = allcpus;
  4502. get_online_cpus();
  4503. err = 0;
  4504. for_each_present_cpu(cpu) {
  4505. if (conf->level == 6) {
  4506. spare_page = alloc_page(GFP_KERNEL);
  4507. if (!spare_page) {
  4508. err = -ENOMEM;
  4509. break;
  4510. }
  4511. per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
  4512. }
  4513. scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
  4514. if (!scribble) {
  4515. err = -ENOMEM;
  4516. break;
  4517. }
  4518. per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
  4519. }
  4520. #ifdef CONFIG_HOTPLUG_CPU
  4521. conf->cpu_notify.notifier_call = raid456_cpu_notify;
  4522. conf->cpu_notify.priority = 0;
  4523. if (err == 0)
  4524. err = register_cpu_notifier(&conf->cpu_notify);
  4525. #endif
  4526. put_online_cpus();
  4527. return err;
  4528. }
  4529. static struct r5conf *setup_conf(struct mddev *mddev)
  4530. {
  4531. struct r5conf *conf;
  4532. int raid_disk, memory, max_disks;
  4533. struct md_rdev *rdev;
  4534. struct disk_info *disk;
  4535. char pers_name[6];
  4536. if (mddev->new_level != 5
  4537. && mddev->new_level != 4
  4538. && mddev->new_level != 6) {
  4539. printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
  4540. mdname(mddev), mddev->new_level);
  4541. return ERR_PTR(-EIO);
  4542. }
  4543. if ((mddev->new_level == 5
  4544. && !algorithm_valid_raid5(mddev->new_layout)) ||
  4545. (mddev->new_level == 6
  4546. && !algorithm_valid_raid6(mddev->new_layout))) {
  4547. printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
  4548. mdname(mddev), mddev->new_layout);
  4549. return ERR_PTR(-EIO);
  4550. }
  4551. if (mddev->new_level == 6 && mddev->raid_disks < 4) {
  4552. printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
  4553. mdname(mddev), mddev->raid_disks);
  4554. return ERR_PTR(-EINVAL);
  4555. }
  4556. if (!mddev->new_chunk_sectors ||
  4557. (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
  4558. !is_power_of_2(mddev->new_chunk_sectors)) {
  4559. printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
  4560. mdname(mddev), mddev->new_chunk_sectors << 9);
  4561. return ERR_PTR(-EINVAL);
  4562. }
  4563. conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
  4564. if (conf == NULL)
  4565. goto abort;
  4566. spin_lock_init(&conf->device_lock);
  4567. init_waitqueue_head(&conf->wait_for_stripe);
  4568. init_waitqueue_head(&conf->wait_for_overlap);
  4569. INIT_LIST_HEAD(&conf->handle_list);
  4570. INIT_LIST_HEAD(&conf->hold_list);
  4571. INIT_LIST_HEAD(&conf->delayed_list);
  4572. INIT_LIST_HEAD(&conf->bitmap_list);
  4573. INIT_LIST_HEAD(&conf->inactive_list);
  4574. atomic_set(&conf->active_stripes, 0);
  4575. atomic_set(&conf->preread_active_stripes, 0);
  4576. atomic_set(&conf->active_aligned_reads, 0);
  4577. conf->bypass_threshold = BYPASS_THRESHOLD;
  4578. conf->recovery_disabled = mddev->recovery_disabled - 1;
  4579. conf->raid_disks = mddev->raid_disks;
  4580. if (mddev->reshape_position == MaxSector)
  4581. conf->previous_raid_disks = mddev->raid_disks;
  4582. else
  4583. conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
  4584. max_disks = max(conf->raid_disks, conf->previous_raid_disks);
  4585. conf->scribble_len = scribble_len(max_disks);
  4586. conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
  4587. GFP_KERNEL);
  4588. if (!conf->disks)
  4589. goto abort;
  4590. conf->mddev = mddev;
  4591. if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
  4592. goto abort;
  4593. conf->level = mddev->new_level;
  4594. if (raid5_alloc_percpu(conf) != 0)
  4595. goto abort;
  4596. pr_debug("raid456: run(%s) called.\n", mdname(mddev));
  4597. rdev_for_each(rdev, mddev) {
  4598. raid_disk = rdev->raid_disk;
  4599. if (raid_disk >= max_disks
  4600. || raid_disk < 0)
  4601. continue;
  4602. disk = conf->disks + raid_disk;
  4603. if (test_bit(Replacement, &rdev->flags)) {
  4604. if (disk->replacement)
  4605. goto abort;
  4606. disk->replacement = rdev;
  4607. } else {
  4608. if (disk->rdev)
  4609. goto abort;
  4610. disk->rdev = rdev;
  4611. }
  4612. if (test_bit(In_sync, &rdev->flags)) {
  4613. char b[BDEVNAME_SIZE];
  4614. printk(KERN_INFO "md/raid:%s: device %s operational as raid"
  4615. " disk %d\n",
  4616. mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
  4617. } else if (rdev->saved_raid_disk != raid_disk)
  4618. /* Cannot rely on bitmap to complete recovery */
  4619. conf->fullsync = 1;
  4620. }
  4621. conf->chunk_sectors = mddev->new_chunk_sectors;
  4622. conf->level = mddev->new_level;
  4623. if (conf->level == 6)
  4624. conf->max_degraded = 2;
  4625. else
  4626. conf->max_degraded = 1;
  4627. conf->algorithm = mddev->new_layout;
  4628. conf->max_nr_stripes = NR_STRIPES;
  4629. conf->reshape_progress = mddev->reshape_position;
  4630. if (conf->reshape_progress != MaxSector) {
  4631. conf->prev_chunk_sectors = mddev->chunk_sectors;
  4632. conf->prev_algo = mddev->layout;
  4633. }
  4634. memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
  4635. max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
  4636. if (grow_stripes(conf, conf->max_nr_stripes)) {
  4637. printk(KERN_ERR
  4638. "md/raid:%s: couldn't allocate %dkB for buffers\n",
  4639. mdname(mddev), memory);
  4640. goto abort;
  4641. } else
  4642. printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
  4643. mdname(mddev), memory);
  4644. sprintf(pers_name, "raid%d", mddev->new_level);
  4645. conf->thread = md_register_thread(raid5d, mddev, pers_name);
  4646. if (!conf->thread) {
  4647. printk(KERN_ERR
  4648. "md/raid:%s: couldn't allocate thread.\n",
  4649. mdname(mddev));
  4650. goto abort;
  4651. }
  4652. return conf;
  4653. abort:
  4654. if (conf) {
  4655. free_conf(conf);
  4656. return ERR_PTR(-EIO);
  4657. } else
  4658. return ERR_PTR(-ENOMEM);
  4659. }
  4660. static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
  4661. {
  4662. switch (algo) {
  4663. case ALGORITHM_PARITY_0:
  4664. if (raid_disk < max_degraded)
  4665. return 1;
  4666. break;
  4667. case ALGORITHM_PARITY_N:
  4668. if (raid_disk >= raid_disks - max_degraded)
  4669. return 1;
  4670. break;
  4671. case ALGORITHM_PARITY_0_6:
  4672. if (raid_disk == 0 ||
  4673. raid_disk == raid_disks - 1)
  4674. return 1;
  4675. break;
  4676. case ALGORITHM_LEFT_ASYMMETRIC_6:
  4677. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  4678. case ALGORITHM_LEFT_SYMMETRIC_6:
  4679. case ALGORITHM_RIGHT_SYMMETRIC_6:
  4680. if (raid_disk == raid_disks - 1)
  4681. return 1;
  4682. }
  4683. return 0;
  4684. }
  4685. static int run(struct mddev *mddev)
  4686. {
  4687. struct r5conf *conf;
  4688. int working_disks = 0;
  4689. int dirty_parity_disks = 0;
  4690. struct md_rdev *rdev;
  4691. sector_t reshape_offset = 0;
  4692. int i;
  4693. long long min_offset_diff = 0;
  4694. int first = 1;
  4695. if (mddev->recovery_cp != MaxSector)
  4696. printk(KERN_NOTICE "md/raid:%s: not clean"
  4697. " -- starting background reconstruction\n",
  4698. mdname(mddev));
  4699. rdev_for_each(rdev, mddev) {
  4700. long long diff;
  4701. if (rdev->raid_disk < 0)
  4702. continue;
  4703. diff = (rdev->new_data_offset - rdev->data_offset);
  4704. if (first) {
  4705. min_offset_diff = diff;
  4706. first = 0;
  4707. } else if (mddev->reshape_backwards &&
  4708. diff < min_offset_diff)
  4709. min_offset_diff = diff;
  4710. else if (!mddev->reshape_backwards &&
  4711. diff > min_offset_diff)
  4712. min_offset_diff = diff;
  4713. }
  4714. if (mddev->reshape_position != MaxSector) {
  4715. /* Check that we can continue the reshape.
  4716. * Difficulties arise if the stripe we would write to
  4717. * next is at or after the stripe we would read from next.
  4718. * For a reshape that changes the number of devices, this
  4719. * is only possible for a very short time, and mdadm makes
  4720. * sure that time appears to have past before assembling
  4721. * the array. So we fail if that time hasn't passed.
  4722. * For a reshape that keeps the number of devices the same
  4723. * mdadm must be monitoring the reshape can keeping the
  4724. * critical areas read-only and backed up. It will start
  4725. * the array in read-only mode, so we check for that.
  4726. */
  4727. sector_t here_new, here_old;
  4728. int old_disks;
  4729. int max_degraded = (mddev->level == 6 ? 2 : 1);
  4730. if (mddev->new_level != mddev->level) {
  4731. printk(KERN_ERR "md/raid:%s: unsupported reshape "
  4732. "required - aborting.\n",
  4733. mdname(mddev));
  4734. return -EINVAL;
  4735. }
  4736. old_disks = mddev->raid_disks - mddev->delta_disks;
  4737. /* reshape_position must be on a new-stripe boundary, and one
  4738. * further up in new geometry must map after here in old
  4739. * geometry.
  4740. */
  4741. here_new = mddev->reshape_position;
  4742. if (sector_div(here_new, mddev->new_chunk_sectors *
  4743. (mddev->raid_disks - max_degraded))) {
  4744. printk(KERN_ERR "md/raid:%s: reshape_position not "
  4745. "on a stripe boundary\n", mdname(mddev));
  4746. return -EINVAL;
  4747. }
  4748. reshape_offset = here_new * mddev->new_chunk_sectors;
  4749. /* here_new is the stripe we will write to */
  4750. here_old = mddev->reshape_position;
  4751. sector_div(here_old, mddev->chunk_sectors *
  4752. (old_disks-max_degraded));
  4753. /* here_old is the first stripe that we might need to read
  4754. * from */
  4755. if (mddev->delta_disks == 0) {
  4756. if ((here_new * mddev->new_chunk_sectors !=
  4757. here_old * mddev->chunk_sectors)) {
  4758. printk(KERN_ERR "md/raid:%s: reshape position is"
  4759. " confused - aborting\n", mdname(mddev));
  4760. return -EINVAL;
  4761. }
  4762. /* We cannot be sure it is safe to start an in-place
  4763. * reshape. It is only safe if user-space is monitoring
  4764. * and taking constant backups.
  4765. * mdadm always starts a situation like this in
  4766. * readonly mode so it can take control before
  4767. * allowing any writes. So just check for that.
  4768. */
  4769. if (abs(min_offset_diff) >= mddev->chunk_sectors &&
  4770. abs(min_offset_diff) >= mddev->new_chunk_sectors)
  4771. /* not really in-place - so OK */;
  4772. else if (mddev->ro == 0) {
  4773. printk(KERN_ERR "md/raid:%s: in-place reshape "
  4774. "must be started in read-only mode "
  4775. "- aborting\n",
  4776. mdname(mddev));
  4777. return -EINVAL;
  4778. }
  4779. } else if (mddev->reshape_backwards
  4780. ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
  4781. here_old * mddev->chunk_sectors)
  4782. : (here_new * mddev->new_chunk_sectors >=
  4783. here_old * mddev->chunk_sectors + (-min_offset_diff))) {
  4784. /* Reading from the same stripe as writing to - bad */
  4785. printk(KERN_ERR "md/raid:%s: reshape_position too early for "
  4786. "auto-recovery - aborting.\n",
  4787. mdname(mddev));
  4788. return -EINVAL;
  4789. }
  4790. printk(KERN_INFO "md/raid:%s: reshape will continue\n",
  4791. mdname(mddev));
  4792. /* OK, we should be able to continue; */
  4793. } else {
  4794. BUG_ON(mddev->level != mddev->new_level);
  4795. BUG_ON(mddev->layout != mddev->new_layout);
  4796. BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
  4797. BUG_ON(mddev->delta_disks != 0);
  4798. }
  4799. if (mddev->private == NULL)
  4800. conf = setup_conf(mddev);
  4801. else
  4802. conf = mddev->private;
  4803. if (IS_ERR(conf))
  4804. return PTR_ERR(conf);
  4805. conf->min_offset_diff = min_offset_diff;
  4806. mddev->thread = conf->thread;
  4807. conf->thread = NULL;
  4808. mddev->private = conf;
  4809. for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
  4810. i++) {
  4811. rdev = conf->disks[i].rdev;
  4812. if (!rdev && conf->disks[i].replacement) {
  4813. /* The replacement is all we have yet */
  4814. rdev = conf->disks[i].replacement;
  4815. conf->disks[i].replacement = NULL;
  4816. clear_bit(Replacement, &rdev->flags);
  4817. conf->disks[i].rdev = rdev;
  4818. }
  4819. if (!rdev)
  4820. continue;
  4821. if (conf->disks[i].replacement &&
  4822. conf->reshape_progress != MaxSector) {
  4823. /* replacements and reshape simply do not mix. */
  4824. printk(KERN_ERR "md: cannot handle concurrent "
  4825. "replacement and reshape.\n");
  4826. goto abort;
  4827. }
  4828. if (test_bit(In_sync, &rdev->flags)) {
  4829. working_disks++;
  4830. continue;
  4831. }
  4832. /* This disc is not fully in-sync. However if it
  4833. * just stored parity (beyond the recovery_offset),
  4834. * when we don't need to be concerned about the
  4835. * array being dirty.
  4836. * When reshape goes 'backwards', we never have
  4837. * partially completed devices, so we only need
  4838. * to worry about reshape going forwards.
  4839. */
  4840. /* Hack because v0.91 doesn't store recovery_offset properly. */
  4841. if (mddev->major_version == 0 &&
  4842. mddev->minor_version > 90)
  4843. rdev->recovery_offset = reshape_offset;
  4844. if (rdev->recovery_offset < reshape_offset) {
  4845. /* We need to check old and new layout */
  4846. if (!only_parity(rdev->raid_disk,
  4847. conf->algorithm,
  4848. conf->raid_disks,
  4849. conf->max_degraded))
  4850. continue;
  4851. }
  4852. if (!only_parity(rdev->raid_disk,
  4853. conf->prev_algo,
  4854. conf->previous_raid_disks,
  4855. conf->max_degraded))
  4856. continue;
  4857. dirty_parity_disks++;
  4858. }
  4859. /*
  4860. * 0 for a fully functional array, 1 or 2 for a degraded array.
  4861. */
  4862. mddev->degraded = calc_degraded(conf);
  4863. if (has_failed(conf)) {
  4864. printk(KERN_ERR "md/raid:%s: not enough operational devices"
  4865. " (%d/%d failed)\n",
  4866. mdname(mddev), mddev->degraded, conf->raid_disks);
  4867. goto abort;
  4868. }
  4869. /* device size must be a multiple of chunk size */
  4870. mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
  4871. mddev->resync_max_sectors = mddev->dev_sectors;
  4872. if (mddev->degraded > dirty_parity_disks &&
  4873. mddev->recovery_cp != MaxSector) {
  4874. if (mddev->ok_start_degraded)
  4875. printk(KERN_WARNING
  4876. "md/raid:%s: starting dirty degraded array"
  4877. " - data corruption possible.\n",
  4878. mdname(mddev));
  4879. else {
  4880. printk(KERN_ERR
  4881. "md/raid:%s: cannot start dirty degraded array.\n",
  4882. mdname(mddev));
  4883. goto abort;
  4884. }
  4885. }
  4886. if (mddev->degraded == 0)
  4887. printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
  4888. " devices, algorithm %d\n", mdname(mddev), conf->level,
  4889. mddev->raid_disks-mddev->degraded, mddev->raid_disks,
  4890. mddev->new_layout);
  4891. else
  4892. printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
  4893. " out of %d devices, algorithm %d\n",
  4894. mdname(mddev), conf->level,
  4895. mddev->raid_disks - mddev->degraded,
  4896. mddev->raid_disks, mddev->new_layout);
  4897. print_raid5_conf(conf);
  4898. if (conf->reshape_progress != MaxSector) {
  4899. conf->reshape_safe = conf->reshape_progress;
  4900. atomic_set(&conf->reshape_stripes, 0);
  4901. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  4902. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  4903. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  4904. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  4905. mddev->sync_thread = md_register_thread(md_do_sync, mddev,
  4906. "reshape");
  4907. }
  4908. /* Ok, everything is just fine now */
  4909. if (mddev->to_remove == &raid5_attrs_group)
  4910. mddev->to_remove = NULL;
  4911. else if (mddev->kobj.sd &&
  4912. sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
  4913. printk(KERN_WARNING
  4914. "raid5: failed to create sysfs attributes for %s\n",
  4915. mdname(mddev));
  4916. md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
  4917. if (mddev->queue) {
  4918. int chunk_size;
  4919. bool discard_supported = true;
  4920. /* read-ahead size must cover two whole stripes, which
  4921. * is 2 * (datadisks) * chunksize where 'n' is the
  4922. * number of raid devices
  4923. */
  4924. int data_disks = conf->previous_raid_disks - conf->max_degraded;
  4925. int stripe = data_disks *
  4926. ((mddev->chunk_sectors << 9) / PAGE_SIZE);
  4927. if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
  4928. mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
  4929. blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
  4930. mddev->queue->backing_dev_info.congested_data = mddev;
  4931. mddev->queue->backing_dev_info.congested_fn = raid5_congested;
  4932. chunk_size = mddev->chunk_sectors << 9;
  4933. blk_queue_io_min(mddev->queue, chunk_size);
  4934. blk_queue_io_opt(mddev->queue, chunk_size *
  4935. (conf->raid_disks - conf->max_degraded));
  4936. /*
  4937. * We can only discard a whole stripe. It doesn't make sense to
  4938. * discard data disk but write parity disk
  4939. */
  4940. stripe = stripe * PAGE_SIZE;
  4941. /* Round up to power of 2, as discard handling
  4942. * currently assumes that */
  4943. while ((stripe-1) & stripe)
  4944. stripe = (stripe | (stripe-1)) + 1;
  4945. mddev->queue->limits.discard_alignment = stripe;
  4946. mddev->queue->limits.discard_granularity = stripe;
  4947. /*
  4948. * unaligned part of discard request will be ignored, so can't
  4949. * guarantee discard_zerors_data
  4950. */
  4951. mddev->queue->limits.discard_zeroes_data = 0;
  4952. rdev_for_each(rdev, mddev) {
  4953. disk_stack_limits(mddev->gendisk, rdev->bdev,
  4954. rdev->data_offset << 9);
  4955. disk_stack_limits(mddev->gendisk, rdev->bdev,
  4956. rdev->new_data_offset << 9);
  4957. /*
  4958. * discard_zeroes_data is required, otherwise data
  4959. * could be lost. Consider a scenario: discard a stripe
  4960. * (the stripe could be inconsistent if
  4961. * discard_zeroes_data is 0); write one disk of the
  4962. * stripe (the stripe could be inconsistent again
  4963. * depending on which disks are used to calculate
  4964. * parity); the disk is broken; The stripe data of this
  4965. * disk is lost.
  4966. */
  4967. if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
  4968. !bdev_get_queue(rdev->bdev)->
  4969. limits.discard_zeroes_data)
  4970. discard_supported = false;
  4971. }
  4972. if (discard_supported &&
  4973. mddev->queue->limits.max_discard_sectors >= stripe &&
  4974. mddev->queue->limits.discard_granularity >= stripe)
  4975. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
  4976. mddev->queue);
  4977. else
  4978. queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
  4979. mddev->queue);
  4980. }
  4981. return 0;
  4982. abort:
  4983. md_unregister_thread(&mddev->thread);
  4984. print_raid5_conf(conf);
  4985. free_conf(conf);
  4986. mddev->private = NULL;
  4987. printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
  4988. return -EIO;
  4989. }
  4990. static int stop(struct mddev *mddev)
  4991. {
  4992. struct r5conf *conf = mddev->private;
  4993. md_unregister_thread(&mddev->thread);
  4994. if (mddev->queue)
  4995. mddev->queue->backing_dev_info.congested_fn = NULL;
  4996. free_conf(conf);
  4997. mddev->private = NULL;
  4998. mddev->to_remove = &raid5_attrs_group;
  4999. return 0;
  5000. }
  5001. static void status(struct seq_file *seq, struct mddev *mddev)
  5002. {
  5003. struct r5conf *conf = mddev->private;
  5004. int i;
  5005. seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
  5006. mddev->chunk_sectors / 2, mddev->layout);
  5007. seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
  5008. for (i = 0; i < conf->raid_disks; i++)
  5009. seq_printf (seq, "%s",
  5010. conf->disks[i].rdev &&
  5011. test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
  5012. seq_printf (seq, "]");
  5013. }
  5014. static void print_raid5_conf (struct r5conf *conf)
  5015. {
  5016. int i;
  5017. struct disk_info *tmp;
  5018. printk(KERN_DEBUG "RAID conf printout:\n");
  5019. if (!conf) {
  5020. printk("(conf==NULL)\n");
  5021. return;
  5022. }
  5023. printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
  5024. conf->raid_disks,
  5025. conf->raid_disks - conf->mddev->degraded);
  5026. for (i = 0; i < conf->raid_disks; i++) {
  5027. char b[BDEVNAME_SIZE];
  5028. tmp = conf->disks + i;
  5029. if (tmp->rdev)
  5030. printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
  5031. i, !test_bit(Faulty, &tmp->rdev->flags),
  5032. bdevname(tmp->rdev->bdev, b));
  5033. }
  5034. }
  5035. static int raid5_spare_active(struct mddev *mddev)
  5036. {
  5037. int i;
  5038. struct r5conf *conf = mddev->private;
  5039. struct disk_info *tmp;
  5040. int count = 0;
  5041. unsigned long flags;
  5042. for (i = 0; i < conf->raid_disks; i++) {
  5043. tmp = conf->disks + i;
  5044. if (tmp->replacement
  5045. && tmp->replacement->recovery_offset == MaxSector
  5046. && !test_bit(Faulty, &tmp->replacement->flags)
  5047. && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
  5048. /* Replacement has just become active. */
  5049. if (!tmp->rdev
  5050. || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
  5051. count++;
  5052. if (tmp->rdev) {
  5053. /* Replaced device not technically faulty,
  5054. * but we need to be sure it gets removed
  5055. * and never re-added.
  5056. */
  5057. set_bit(Faulty, &tmp->rdev->flags);
  5058. sysfs_notify_dirent_safe(
  5059. tmp->rdev->sysfs_state);
  5060. }
  5061. sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
  5062. } else if (tmp->rdev
  5063. && tmp->rdev->recovery_offset == MaxSector
  5064. && !test_bit(Faulty, &tmp->rdev->flags)
  5065. && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
  5066. count++;
  5067. sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
  5068. }
  5069. }
  5070. spin_lock_irqsave(&conf->device_lock, flags);
  5071. mddev->degraded = calc_degraded(conf);
  5072. spin_unlock_irqrestore(&conf->device_lock, flags);
  5073. print_raid5_conf(conf);
  5074. return count;
  5075. }
  5076. static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
  5077. {
  5078. struct r5conf *conf = mddev->private;
  5079. int err = 0;
  5080. int number = rdev->raid_disk;
  5081. struct md_rdev **rdevp;
  5082. struct disk_info *p = conf->disks + number;
  5083. print_raid5_conf(conf);
  5084. if (rdev == p->rdev)
  5085. rdevp = &p->rdev;
  5086. else if (rdev == p->replacement)
  5087. rdevp = &p->replacement;
  5088. else
  5089. return 0;
  5090. if (number >= conf->raid_disks &&
  5091. conf->reshape_progress == MaxSector)
  5092. clear_bit(In_sync, &rdev->flags);
  5093. if (test_bit(In_sync, &rdev->flags) ||
  5094. atomic_read(&rdev->nr_pending)) {
  5095. err = -EBUSY;
  5096. goto abort;
  5097. }
  5098. /* Only remove non-faulty devices if recovery
  5099. * isn't possible.
  5100. */
  5101. if (!test_bit(Faulty, &rdev->flags) &&
  5102. mddev->recovery_disabled != conf->recovery_disabled &&
  5103. !has_failed(conf) &&
  5104. (!p->replacement || p->replacement == rdev) &&
  5105. number < conf->raid_disks) {
  5106. err = -EBUSY;
  5107. goto abort;
  5108. }
  5109. *rdevp = NULL;
  5110. synchronize_rcu();
  5111. if (atomic_read(&rdev->nr_pending)) {
  5112. /* lost the race, try later */
  5113. err = -EBUSY;
  5114. *rdevp = rdev;
  5115. } else if (p->replacement) {
  5116. /* We must have just cleared 'rdev' */
  5117. p->rdev = p->replacement;
  5118. clear_bit(Replacement, &p->replacement->flags);
  5119. smp_mb(); /* Make sure other CPUs may see both as identical
  5120. * but will never see neither - if they are careful
  5121. */
  5122. p->replacement = NULL;
  5123. clear_bit(WantReplacement, &rdev->flags);
  5124. } else
  5125. /* We might have just removed the Replacement as faulty-
  5126. * clear the bit just in case
  5127. */
  5128. clear_bit(WantReplacement, &rdev->flags);
  5129. abort:
  5130. print_raid5_conf(conf);
  5131. return err;
  5132. }
  5133. static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
  5134. {
  5135. struct r5conf *conf = mddev->private;
  5136. int err = -EEXIST;
  5137. int disk;
  5138. struct disk_info *p;
  5139. int first = 0;
  5140. int last = conf->raid_disks - 1;
  5141. if (mddev->recovery_disabled == conf->recovery_disabled)
  5142. return -EBUSY;
  5143. if (rdev->saved_raid_disk < 0 && has_failed(conf))
  5144. /* no point adding a device */
  5145. return -EINVAL;
  5146. if (rdev->raid_disk >= 0)
  5147. first = last = rdev->raid_disk;
  5148. /*
  5149. * find the disk ... but prefer rdev->saved_raid_disk
  5150. * if possible.
  5151. */
  5152. if (rdev->saved_raid_disk >= 0 &&
  5153. rdev->saved_raid_disk >= first &&
  5154. conf->disks[rdev->saved_raid_disk].rdev == NULL)
  5155. first = rdev->saved_raid_disk;
  5156. for (disk = first; disk <= last; disk++) {
  5157. p = conf->disks + disk;
  5158. if (p->rdev == NULL) {
  5159. clear_bit(In_sync, &rdev->flags);
  5160. rdev->raid_disk = disk;
  5161. err = 0;
  5162. if (rdev->saved_raid_disk != disk)
  5163. conf->fullsync = 1;
  5164. rcu_assign_pointer(p->rdev, rdev);
  5165. goto out;
  5166. }
  5167. }
  5168. for (disk = first; disk <= last; disk++) {
  5169. p = conf->disks + disk;
  5170. if (test_bit(WantReplacement, &p->rdev->flags) &&
  5171. p->replacement == NULL) {
  5172. clear_bit(In_sync, &rdev->flags);
  5173. set_bit(Replacement, &rdev->flags);
  5174. rdev->raid_disk = disk;
  5175. err = 0;
  5176. conf->fullsync = 1;
  5177. rcu_assign_pointer(p->replacement, rdev);
  5178. break;
  5179. }
  5180. }
  5181. out:
  5182. print_raid5_conf(conf);
  5183. return err;
  5184. }
  5185. static int raid5_resize(struct mddev *mddev, sector_t sectors)
  5186. {
  5187. /* no resync is happening, and there is enough space
  5188. * on all devices, so we can resize.
  5189. * We need to make sure resync covers any new space.
  5190. * If the array is shrinking we should possibly wait until
  5191. * any io in the removed space completes, but it hardly seems
  5192. * worth it.
  5193. */
  5194. sector_t newsize;
  5195. sectors &= ~((sector_t)mddev->chunk_sectors - 1);
  5196. newsize = raid5_size(mddev, sectors, mddev->raid_disks);
  5197. if (mddev->external_size &&
  5198. mddev->array_sectors > newsize)
  5199. return -EINVAL;
  5200. if (mddev->bitmap) {
  5201. int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
  5202. if (ret)
  5203. return ret;
  5204. }
  5205. md_set_array_sectors(mddev, newsize);
  5206. set_capacity(mddev->gendisk, mddev->array_sectors);
  5207. revalidate_disk(mddev->gendisk);
  5208. if (sectors > mddev->dev_sectors &&
  5209. mddev->recovery_cp > mddev->dev_sectors) {
  5210. mddev->recovery_cp = mddev->dev_sectors;
  5211. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5212. }
  5213. mddev->dev_sectors = sectors;
  5214. mddev->resync_max_sectors = sectors;
  5215. return 0;
  5216. }
  5217. static int check_stripe_cache(struct mddev *mddev)
  5218. {
  5219. /* Can only proceed if there are plenty of stripe_heads.
  5220. * We need a minimum of one full stripe,, and for sensible progress
  5221. * it is best to have about 4 times that.
  5222. * If we require 4 times, then the default 256 4K stripe_heads will
  5223. * allow for chunk sizes up to 256K, which is probably OK.
  5224. * If the chunk size is greater, user-space should request more
  5225. * stripe_heads first.
  5226. */
  5227. struct r5conf *conf = mddev->private;
  5228. if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
  5229. > conf->max_nr_stripes ||
  5230. ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
  5231. > conf->max_nr_stripes) {
  5232. printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
  5233. mdname(mddev),
  5234. ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
  5235. / STRIPE_SIZE)*4);
  5236. return 0;
  5237. }
  5238. return 1;
  5239. }
  5240. static int check_reshape(struct mddev *mddev)
  5241. {
  5242. struct r5conf *conf = mddev->private;
  5243. if (mddev->delta_disks == 0 &&
  5244. mddev->new_layout == mddev->layout &&
  5245. mddev->new_chunk_sectors == mddev->chunk_sectors)
  5246. return 0; /* nothing to do */
  5247. if (has_failed(conf))
  5248. return -EINVAL;
  5249. if (mddev->delta_disks < 0) {
  5250. /* We might be able to shrink, but the devices must
  5251. * be made bigger first.
  5252. * For raid6, 4 is the minimum size.
  5253. * Otherwise 2 is the minimum
  5254. */
  5255. int min = 2;
  5256. if (mddev->level == 6)
  5257. min = 4;
  5258. if (mddev->raid_disks + mddev->delta_disks < min)
  5259. return -EINVAL;
  5260. }
  5261. if (!check_stripe_cache(mddev))
  5262. return -ENOSPC;
  5263. return resize_stripes(conf, (conf->previous_raid_disks
  5264. + mddev->delta_disks));
  5265. }
  5266. static int raid5_start_reshape(struct mddev *mddev)
  5267. {
  5268. struct r5conf *conf = mddev->private;
  5269. struct md_rdev *rdev;
  5270. int spares = 0;
  5271. unsigned long flags;
  5272. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  5273. return -EBUSY;
  5274. if (!check_stripe_cache(mddev))
  5275. return -ENOSPC;
  5276. if (has_failed(conf))
  5277. return -EINVAL;
  5278. rdev_for_each(rdev, mddev) {
  5279. if (!test_bit(In_sync, &rdev->flags)
  5280. && !test_bit(Faulty, &rdev->flags))
  5281. spares++;
  5282. }
  5283. if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
  5284. /* Not enough devices even to make a degraded array
  5285. * of that size
  5286. */
  5287. return -EINVAL;
  5288. /* Refuse to reduce size of the array. Any reductions in
  5289. * array size must be through explicit setting of array_size
  5290. * attribute.
  5291. */
  5292. if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
  5293. < mddev->array_sectors) {
  5294. printk(KERN_ERR "md/raid:%s: array size must be reduced "
  5295. "before number of disks\n", mdname(mddev));
  5296. return -EINVAL;
  5297. }
  5298. atomic_set(&conf->reshape_stripes, 0);
  5299. spin_lock_irq(&conf->device_lock);
  5300. conf->previous_raid_disks = conf->raid_disks;
  5301. conf->raid_disks += mddev->delta_disks;
  5302. conf->prev_chunk_sectors = conf->chunk_sectors;
  5303. conf->chunk_sectors = mddev->new_chunk_sectors;
  5304. conf->prev_algo = conf->algorithm;
  5305. conf->algorithm = mddev->new_layout;
  5306. conf->generation++;
  5307. /* Code that selects data_offset needs to see the generation update
  5308. * if reshape_progress has been set - so a memory barrier needed.
  5309. */
  5310. smp_mb();
  5311. if (mddev->reshape_backwards)
  5312. conf->reshape_progress = raid5_size(mddev, 0, 0);
  5313. else
  5314. conf->reshape_progress = 0;
  5315. conf->reshape_safe = conf->reshape_progress;
  5316. spin_unlock_irq(&conf->device_lock);
  5317. /* Add some new drives, as many as will fit.
  5318. * We know there are enough to make the newly sized array work.
  5319. * Don't add devices if we are reducing the number of
  5320. * devices in the array. This is because it is not possible
  5321. * to correctly record the "partially reconstructed" state of
  5322. * such devices during the reshape and confusion could result.
  5323. */
  5324. if (mddev->delta_disks >= 0) {
  5325. rdev_for_each(rdev, mddev)
  5326. if (rdev->raid_disk < 0 &&
  5327. !test_bit(Faulty, &rdev->flags)) {
  5328. if (raid5_add_disk(mddev, rdev) == 0) {
  5329. if (rdev->raid_disk
  5330. >= conf->previous_raid_disks)
  5331. set_bit(In_sync, &rdev->flags);
  5332. else
  5333. rdev->recovery_offset = 0;
  5334. if (sysfs_link_rdev(mddev, rdev))
  5335. /* Failure here is OK */;
  5336. }
  5337. } else if (rdev->raid_disk >= conf->previous_raid_disks
  5338. && !test_bit(Faulty, &rdev->flags)) {
  5339. /* This is a spare that was manually added */
  5340. set_bit(In_sync, &rdev->flags);
  5341. }
  5342. /* When a reshape changes the number of devices,
  5343. * ->degraded is measured against the larger of the
  5344. * pre and post number of devices.
  5345. */
  5346. spin_lock_irqsave(&conf->device_lock, flags);
  5347. mddev->degraded = calc_degraded(conf);
  5348. spin_unlock_irqrestore(&conf->device_lock, flags);
  5349. }
  5350. mddev->raid_disks = conf->raid_disks;
  5351. mddev->reshape_position = conf->reshape_progress;
  5352. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  5353. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  5354. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  5355. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  5356. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  5357. mddev->sync_thread = md_register_thread(md_do_sync, mddev,
  5358. "reshape");
  5359. if (!mddev->sync_thread) {
  5360. mddev->recovery = 0;
  5361. spin_lock_irq(&conf->device_lock);
  5362. mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
  5363. rdev_for_each(rdev, mddev)
  5364. rdev->new_data_offset = rdev->data_offset;
  5365. smp_wmb();
  5366. conf->reshape_progress = MaxSector;
  5367. mddev->reshape_position = MaxSector;
  5368. spin_unlock_irq(&conf->device_lock);
  5369. return -EAGAIN;
  5370. }
  5371. conf->reshape_checkpoint = jiffies;
  5372. md_wakeup_thread(mddev->sync_thread);
  5373. md_new_event(mddev);
  5374. return 0;
  5375. }
  5376. /* This is called from the reshape thread and should make any
  5377. * changes needed in 'conf'
  5378. */
  5379. static void end_reshape(struct r5conf *conf)
  5380. {
  5381. if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
  5382. struct md_rdev *rdev;
  5383. spin_lock_irq(&conf->device_lock);
  5384. conf->previous_raid_disks = conf->raid_disks;
  5385. rdev_for_each(rdev, conf->mddev)
  5386. rdev->data_offset = rdev->new_data_offset;
  5387. smp_wmb();
  5388. conf->reshape_progress = MaxSector;
  5389. spin_unlock_irq(&conf->device_lock);
  5390. wake_up(&conf->wait_for_overlap);
  5391. /* read-ahead size must cover two whole stripes, which is
  5392. * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
  5393. */
  5394. if (conf->mddev->queue) {
  5395. int data_disks = conf->raid_disks - conf->max_degraded;
  5396. int stripe = data_disks * ((conf->chunk_sectors << 9)
  5397. / PAGE_SIZE);
  5398. if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
  5399. conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
  5400. }
  5401. }
  5402. }
  5403. /* This is called from the raid5d thread with mddev_lock held.
  5404. * It makes config changes to the device.
  5405. */
  5406. static void raid5_finish_reshape(struct mddev *mddev)
  5407. {
  5408. struct r5conf *conf = mddev->private;
  5409. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  5410. if (mddev->delta_disks > 0) {
  5411. md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
  5412. set_capacity(mddev->gendisk, mddev->array_sectors);
  5413. revalidate_disk(mddev->gendisk);
  5414. } else {
  5415. int d;
  5416. spin_lock_irq(&conf->device_lock);
  5417. mddev->degraded = calc_degraded(conf);
  5418. spin_unlock_irq(&conf->device_lock);
  5419. for (d = conf->raid_disks ;
  5420. d < conf->raid_disks - mddev->delta_disks;
  5421. d++) {
  5422. struct md_rdev *rdev = conf->disks[d].rdev;
  5423. if (rdev)
  5424. clear_bit(In_sync, &rdev->flags);
  5425. rdev = conf->disks[d].replacement;
  5426. if (rdev)
  5427. clear_bit(In_sync, &rdev->flags);
  5428. }
  5429. }
  5430. mddev->layout = conf->algorithm;
  5431. mddev->chunk_sectors = conf->chunk_sectors;
  5432. mddev->reshape_position = MaxSector;
  5433. mddev->delta_disks = 0;
  5434. mddev->reshape_backwards = 0;
  5435. }
  5436. }
  5437. static void raid5_quiesce(struct mddev *mddev, int state)
  5438. {
  5439. struct r5conf *conf = mddev->private;
  5440. switch(state) {
  5441. case 2: /* resume for a suspend */
  5442. wake_up(&conf->wait_for_overlap);
  5443. break;
  5444. case 1: /* stop all writes */
  5445. spin_lock_irq(&conf->device_lock);
  5446. /* '2' tells resync/reshape to pause so that all
  5447. * active stripes can drain
  5448. */
  5449. conf->quiesce = 2;
  5450. wait_event_lock_irq(conf->wait_for_stripe,
  5451. atomic_read(&conf->active_stripes) == 0 &&
  5452. atomic_read(&conf->active_aligned_reads) == 0,
  5453. conf->device_lock);
  5454. conf->quiesce = 1;
  5455. spin_unlock_irq(&conf->device_lock);
  5456. /* allow reshape to continue */
  5457. wake_up(&conf->wait_for_overlap);
  5458. break;
  5459. case 0: /* re-enable writes */
  5460. spin_lock_irq(&conf->device_lock);
  5461. conf->quiesce = 0;
  5462. wake_up(&conf->wait_for_stripe);
  5463. wake_up(&conf->wait_for_overlap);
  5464. spin_unlock_irq(&conf->device_lock);
  5465. break;
  5466. }
  5467. }
  5468. static void *raid45_takeover_raid0(struct mddev *mddev, int level)
  5469. {
  5470. struct r0conf *raid0_conf = mddev->private;
  5471. sector_t sectors;
  5472. /* for raid0 takeover only one zone is supported */
  5473. if (raid0_conf->nr_strip_zones > 1) {
  5474. printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
  5475. mdname(mddev));
  5476. return ERR_PTR(-EINVAL);
  5477. }
  5478. sectors = raid0_conf->strip_zone[0].zone_end;
  5479. sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
  5480. mddev->dev_sectors = sectors;
  5481. mddev->new_level = level;
  5482. mddev->new_layout = ALGORITHM_PARITY_N;
  5483. mddev->new_chunk_sectors = mddev->chunk_sectors;
  5484. mddev->raid_disks += 1;
  5485. mddev->delta_disks = 1;
  5486. /* make sure it will be not marked as dirty */
  5487. mddev->recovery_cp = MaxSector;
  5488. return setup_conf(mddev);
  5489. }
  5490. static void *raid5_takeover_raid1(struct mddev *mddev)
  5491. {
  5492. int chunksect;
  5493. if (mddev->raid_disks != 2 ||
  5494. mddev->degraded > 1)
  5495. return ERR_PTR(-EINVAL);
  5496. /* Should check if there are write-behind devices? */
  5497. chunksect = 64*2; /* 64K by default */
  5498. /* The array must be an exact multiple of chunksize */
  5499. while (chunksect && (mddev->array_sectors & (chunksect-1)))
  5500. chunksect >>= 1;
  5501. if ((chunksect<<9) < STRIPE_SIZE)
  5502. /* array size does not allow a suitable chunk size */
  5503. return ERR_PTR(-EINVAL);
  5504. mddev->new_level = 5;
  5505. mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
  5506. mddev->new_chunk_sectors = chunksect;
  5507. return setup_conf(mddev);
  5508. }
  5509. static void *raid5_takeover_raid6(struct mddev *mddev)
  5510. {
  5511. int new_layout;
  5512. switch (mddev->layout) {
  5513. case ALGORITHM_LEFT_ASYMMETRIC_6:
  5514. new_layout = ALGORITHM_LEFT_ASYMMETRIC;
  5515. break;
  5516. case ALGORITHM_RIGHT_ASYMMETRIC_6:
  5517. new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
  5518. break;
  5519. case ALGORITHM_LEFT_SYMMETRIC_6:
  5520. new_layout = ALGORITHM_LEFT_SYMMETRIC;
  5521. break;
  5522. case ALGORITHM_RIGHT_SYMMETRIC_6:
  5523. new_layout = ALGORITHM_RIGHT_SYMMETRIC;
  5524. break;
  5525. case ALGORITHM_PARITY_0_6:
  5526. new_layout = ALGORITHM_PARITY_0;
  5527. break;
  5528. case ALGORITHM_PARITY_N:
  5529. new_layout = ALGORITHM_PARITY_N;
  5530. break;
  5531. default:
  5532. return ERR_PTR(-EINVAL);
  5533. }
  5534. mddev->new_level = 5;
  5535. mddev->new_layout = new_layout;
  5536. mddev->delta_disks = -1;
  5537. mddev->raid_disks -= 1;
  5538. return setup_conf(mddev);
  5539. }
  5540. static int raid5_check_reshape(struct mddev *mddev)
  5541. {
  5542. /* For a 2-drive array, the layout and chunk size can be changed
  5543. * immediately as not restriping is needed.
  5544. * For larger arrays we record the new value - after validation
  5545. * to be used by a reshape pass.
  5546. */
  5547. struct r5conf *conf = mddev->private;
  5548. int new_chunk = mddev->new_chunk_sectors;
  5549. if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
  5550. return -EINVAL;
  5551. if (new_chunk > 0) {
  5552. if (!is_power_of_2(new_chunk))
  5553. return -EINVAL;
  5554. if (new_chunk < (PAGE_SIZE>>9))
  5555. return -EINVAL;
  5556. if (mddev->array_sectors & (new_chunk-1))
  5557. /* not factor of array size */
  5558. return -EINVAL;
  5559. }
  5560. /* They look valid */
  5561. if (mddev->raid_disks == 2) {
  5562. /* can make the change immediately */
  5563. if (mddev->new_layout >= 0) {
  5564. conf->algorithm = mddev->new_layout;
  5565. mddev->layout = mddev->new_layout;
  5566. }
  5567. if (new_chunk > 0) {
  5568. conf->chunk_sectors = new_chunk ;
  5569. mddev->chunk_sectors = new_chunk;
  5570. }
  5571. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  5572. md_wakeup_thread(mddev->thread);
  5573. }
  5574. return check_reshape(mddev);
  5575. }
  5576. static int raid6_check_reshape(struct mddev *mddev)
  5577. {
  5578. int new_chunk = mddev->new_chunk_sectors;
  5579. if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
  5580. return -EINVAL;
  5581. if (new_chunk > 0) {
  5582. if (!is_power_of_2(new_chunk))
  5583. return -EINVAL;
  5584. if (new_chunk < (PAGE_SIZE >> 9))
  5585. return -EINVAL;
  5586. if (mddev->array_sectors & (new_chunk-1))
  5587. /* not factor of array size */
  5588. return -EINVAL;
  5589. }
  5590. /* They look valid */
  5591. return check_reshape(mddev);
  5592. }
  5593. static void *raid5_takeover(struct mddev *mddev)
  5594. {
  5595. /* raid5 can take over:
  5596. * raid0 - if there is only one strip zone - make it a raid4 layout
  5597. * raid1 - if there are two drives. We need to know the chunk size
  5598. * raid4 - trivial - just use a raid4 layout.
  5599. * raid6 - Providing it is a *_6 layout
  5600. */
  5601. if (mddev->level == 0)
  5602. return raid45_takeover_raid0(mddev, 5);
  5603. if (mddev->level == 1)
  5604. return raid5_takeover_raid1(mddev);
  5605. if (mddev->level == 4) {
  5606. mddev->new_layout = ALGORITHM_PARITY_N;
  5607. mddev->new_level = 5;
  5608. return setup_conf(mddev);
  5609. }
  5610. if (mddev->level == 6)
  5611. return raid5_takeover_raid6(mddev);
  5612. return ERR_PTR(-EINVAL);
  5613. }
  5614. static void *raid4_takeover(struct mddev *mddev)
  5615. {
  5616. /* raid4 can take over:
  5617. * raid0 - if there is only one strip zone
  5618. * raid5 - if layout is right
  5619. */
  5620. if (mddev->level == 0)
  5621. return raid45_takeover_raid0(mddev, 4);
  5622. if (mddev->level == 5 &&
  5623. mddev->layout == ALGORITHM_PARITY_N) {
  5624. mddev->new_layout = 0;
  5625. mddev->new_level = 4;
  5626. return setup_conf(mddev);
  5627. }
  5628. return ERR_PTR(-EINVAL);
  5629. }
  5630. static struct md_personality raid5_personality;
  5631. static void *raid6_takeover(struct mddev *mddev)
  5632. {
  5633. /* Currently can only take over a raid5. We map the
  5634. * personality to an equivalent raid6 personality
  5635. * with the Q block at the end.
  5636. */
  5637. int new_layout;
  5638. if (mddev->pers != &raid5_personality)
  5639. return ERR_PTR(-EINVAL);
  5640. if (mddev->degraded > 1)
  5641. return ERR_PTR(-EINVAL);
  5642. if (mddev->raid_disks > 253)
  5643. return ERR_PTR(-EINVAL);
  5644. if (mddev->raid_disks < 3)
  5645. return ERR_PTR(-EINVAL);
  5646. switch (mddev->layout) {
  5647. case ALGORITHM_LEFT_ASYMMETRIC:
  5648. new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
  5649. break;
  5650. case ALGORITHM_RIGHT_ASYMMETRIC:
  5651. new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
  5652. break;
  5653. case ALGORITHM_LEFT_SYMMETRIC:
  5654. new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
  5655. break;
  5656. case ALGORITHM_RIGHT_SYMMETRIC:
  5657. new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
  5658. break;
  5659. case ALGORITHM_PARITY_0:
  5660. new_layout = ALGORITHM_PARITY_0_6;
  5661. break;
  5662. case ALGORITHM_PARITY_N:
  5663. new_layout = ALGORITHM_PARITY_N;
  5664. break;
  5665. default:
  5666. return ERR_PTR(-EINVAL);
  5667. }
  5668. mddev->new_level = 6;
  5669. mddev->new_layout = new_layout;
  5670. mddev->delta_disks = 1;
  5671. mddev->raid_disks += 1;
  5672. return setup_conf(mddev);
  5673. }
  5674. static struct md_personality raid6_personality =
  5675. {
  5676. .name = "raid6",
  5677. .level = 6,
  5678. .owner = THIS_MODULE,
  5679. .make_request = make_request,
  5680. .run = run,
  5681. .stop = stop,
  5682. .status = status,
  5683. .error_handler = error,
  5684. .hot_add_disk = raid5_add_disk,
  5685. .hot_remove_disk= raid5_remove_disk,
  5686. .spare_active = raid5_spare_active,
  5687. .sync_request = sync_request,
  5688. .resize = raid5_resize,
  5689. .size = raid5_size,
  5690. .check_reshape = raid6_check_reshape,
  5691. .start_reshape = raid5_start_reshape,
  5692. .finish_reshape = raid5_finish_reshape,
  5693. .quiesce = raid5_quiesce,
  5694. .takeover = raid6_takeover,
  5695. };
  5696. static struct md_personality raid5_personality =
  5697. {
  5698. .name = "raid5",
  5699. .level = 5,
  5700. .owner = THIS_MODULE,
  5701. .make_request = make_request,
  5702. .run = run,
  5703. .stop = stop,
  5704. .status = status,
  5705. .error_handler = error,
  5706. .hot_add_disk = raid5_add_disk,
  5707. .hot_remove_disk= raid5_remove_disk,
  5708. .spare_active = raid5_spare_active,
  5709. .sync_request = sync_request,
  5710. .resize = raid5_resize,
  5711. .size = raid5_size,
  5712. .check_reshape = raid5_check_reshape,
  5713. .start_reshape = raid5_start_reshape,
  5714. .finish_reshape = raid5_finish_reshape,
  5715. .quiesce = raid5_quiesce,
  5716. .takeover = raid5_takeover,
  5717. };
  5718. static struct md_personality raid4_personality =
  5719. {
  5720. .name = "raid4",
  5721. .level = 4,
  5722. .owner = THIS_MODULE,
  5723. .make_request = make_request,
  5724. .run = run,
  5725. .stop = stop,
  5726. .status = status,
  5727. .error_handler = error,
  5728. .hot_add_disk = raid5_add_disk,
  5729. .hot_remove_disk= raid5_remove_disk,
  5730. .spare_active = raid5_spare_active,
  5731. .sync_request = sync_request,
  5732. .resize = raid5_resize,
  5733. .size = raid5_size,
  5734. .check_reshape = raid5_check_reshape,
  5735. .start_reshape = raid5_start_reshape,
  5736. .finish_reshape = raid5_finish_reshape,
  5737. .quiesce = raid5_quiesce,
  5738. .takeover = raid4_takeover,
  5739. };
  5740. static int __init raid5_init(void)
  5741. {
  5742. register_md_personality(&raid6_personality);
  5743. register_md_personality(&raid5_personality);
  5744. register_md_personality(&raid4_personality);
  5745. return 0;
  5746. }
  5747. static void raid5_exit(void)
  5748. {
  5749. unregister_md_personality(&raid6_personality);
  5750. unregister_md_personality(&raid5_personality);
  5751. unregister_md_personality(&raid4_personality);
  5752. }
  5753. module_init(raid5_init);
  5754. module_exit(raid5_exit);
  5755. MODULE_LICENSE("GPL");
  5756. MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
  5757. MODULE_ALIAS("md-personality-4"); /* RAID5 */
  5758. MODULE_ALIAS("md-raid5");
  5759. MODULE_ALIAS("md-raid4");
  5760. MODULE_ALIAS("md-level-5");
  5761. MODULE_ALIAS("md-level-4");
  5762. MODULE_ALIAS("md-personality-8"); /* RAID6 */
  5763. MODULE_ALIAS("md-raid6");
  5764. MODULE_ALIAS("md-level-6");
  5765. /* This used to be two separate modules, they were: */
  5766. MODULE_ALIAS("raid5");
  5767. MODULE_ALIAS("raid6");