md.c 222 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627
  1. /*
  2. md.c : Multiple Devices driver for Linux
  3. Copyright (C) 1998, 1999, 2000 Ingo Molnar
  4. completely rewritten, based on the MD driver code from Marc Zyngier
  5. Changes:
  6. - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
  7. - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
  8. - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
  9. - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
  10. - kmod support by: Cyrus Durgin
  11. - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
  12. - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
  13. - lots of fixes and improvements to the RAID1/RAID5 and generic
  14. RAID code (such as request based resynchronization):
  15. Neil Brown <neilb@cse.unsw.edu.au>.
  16. - persistent bitmap code
  17. Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
  18. This program is free software; you can redistribute it and/or modify
  19. it under the terms of the GNU General Public License as published by
  20. the Free Software Foundation; either version 2, or (at your option)
  21. any later version.
  22. You should have received a copy of the GNU General Public License
  23. (for example /usr/src/linux/COPYING); if not, write to the Free
  24. Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  25. */
  26. #include <linux/kthread.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/sysctl.h>
  29. #include <linux/seq_file.h>
  30. #include <linux/fs.h>
  31. #include <linux/poll.h>
  32. #include <linux/ctype.h>
  33. #include <linux/string.h>
  34. #include <linux/hdreg.h>
  35. #include <linux/proc_fs.h>
  36. #include <linux/random.h>
  37. #include <linux/module.h>
  38. #include <linux/reboot.h>
  39. #include <linux/file.h>
  40. #include <linux/compat.h>
  41. #include <linux/delay.h>
  42. #include <linux/raid/md_p.h>
  43. #include <linux/raid/md_u.h>
  44. #include <linux/slab.h>
  45. #include "md.h"
  46. #include "bitmap.h"
  47. #ifndef MODULE
  48. static void autostart_arrays(int part);
  49. #endif
  50. /* pers_list is a list of registered personalities protected
  51. * by pers_lock.
  52. * pers_lock does extra service to protect accesses to
  53. * mddev->thread when the mutex cannot be held.
  54. */
  55. static LIST_HEAD(pers_list);
  56. static DEFINE_SPINLOCK(pers_lock);
  57. static void md_print_devices(void);
  58. static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
  59. static struct workqueue_struct *md_wq;
  60. static struct workqueue_struct *md_misc_wq;
  61. #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
  62. /*
  63. * Default number of read corrections we'll attempt on an rdev
  64. * before ejecting it from the array. We divide the read error
  65. * count by 2 for every hour elapsed between read errors.
  66. */
  67. #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
  68. /*
  69. * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
  70. * is 1000 KB/sec, so the extra system load does not show up that much.
  71. * Increase it if you want to have more _guaranteed_ speed. Note that
  72. * the RAID driver will use the maximum available bandwidth if the IO
  73. * subsystem is idle. There is also an 'absolute maximum' reconstruction
  74. * speed limit - in case reconstruction slows down your system despite
  75. * idle IO detection.
  76. *
  77. * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
  78. * or /sys/block/mdX/md/sync_speed_{min,max}
  79. */
  80. static int sysctl_speed_limit_min = 1000;
  81. static int sysctl_speed_limit_max = 200000;
  82. static inline int speed_min(struct mddev *mddev)
  83. {
  84. return mddev->sync_speed_min ?
  85. mddev->sync_speed_min : sysctl_speed_limit_min;
  86. }
  87. static inline int speed_max(struct mddev *mddev)
  88. {
  89. return mddev->sync_speed_max ?
  90. mddev->sync_speed_max : sysctl_speed_limit_max;
  91. }
  92. static struct ctl_table_header *raid_table_header;
  93. static ctl_table raid_table[] = {
  94. {
  95. .procname = "speed_limit_min",
  96. .data = &sysctl_speed_limit_min,
  97. .maxlen = sizeof(int),
  98. .mode = S_IRUGO|S_IWUSR,
  99. .proc_handler = proc_dointvec,
  100. },
  101. {
  102. .procname = "speed_limit_max",
  103. .data = &sysctl_speed_limit_max,
  104. .maxlen = sizeof(int),
  105. .mode = S_IRUGO|S_IWUSR,
  106. .proc_handler = proc_dointvec,
  107. },
  108. { }
  109. };
  110. static ctl_table raid_dir_table[] = {
  111. {
  112. .procname = "raid",
  113. .maxlen = 0,
  114. .mode = S_IRUGO|S_IXUGO,
  115. .child = raid_table,
  116. },
  117. { }
  118. };
  119. static ctl_table raid_root_table[] = {
  120. {
  121. .procname = "dev",
  122. .maxlen = 0,
  123. .mode = 0555,
  124. .child = raid_dir_table,
  125. },
  126. { }
  127. };
  128. static const struct block_device_operations md_fops;
  129. static int start_readonly;
  130. /* bio_clone_mddev
  131. * like bio_clone, but with a local bio set
  132. */
  133. static void mddev_bio_destructor(struct bio *bio)
  134. {
  135. struct mddev *mddev, **mddevp;
  136. mddevp = (void*)bio;
  137. mddev = mddevp[-1];
  138. bio_free(bio, mddev->bio_set);
  139. }
  140. struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
  141. struct mddev *mddev)
  142. {
  143. struct bio *b;
  144. struct mddev **mddevp;
  145. if (!mddev || !mddev->bio_set)
  146. return bio_alloc(gfp_mask, nr_iovecs);
  147. b = bio_alloc_bioset(gfp_mask, nr_iovecs,
  148. mddev->bio_set);
  149. if (!b)
  150. return NULL;
  151. mddevp = (void*)b;
  152. mddevp[-1] = mddev;
  153. b->bi_destructor = mddev_bio_destructor;
  154. return b;
  155. }
  156. EXPORT_SYMBOL_GPL(bio_alloc_mddev);
  157. struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
  158. struct mddev *mddev)
  159. {
  160. struct bio *b;
  161. struct mddev **mddevp;
  162. if (!mddev || !mddev->bio_set)
  163. return bio_clone(bio, gfp_mask);
  164. b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs,
  165. mddev->bio_set);
  166. if (!b)
  167. return NULL;
  168. mddevp = (void*)b;
  169. mddevp[-1] = mddev;
  170. b->bi_destructor = mddev_bio_destructor;
  171. __bio_clone(b, bio);
  172. if (bio_integrity(bio)) {
  173. int ret;
  174. ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set);
  175. if (ret < 0) {
  176. bio_put(b);
  177. return NULL;
  178. }
  179. }
  180. return b;
  181. }
  182. EXPORT_SYMBOL_GPL(bio_clone_mddev);
  183. void md_trim_bio(struct bio *bio, int offset, int size)
  184. {
  185. /* 'bio' is a cloned bio which we need to trim to match
  186. * the given offset and size.
  187. * This requires adjusting bi_sector, bi_size, and bi_io_vec
  188. */
  189. int i;
  190. struct bio_vec *bvec;
  191. int sofar = 0;
  192. size <<= 9;
  193. if (offset == 0 && size == bio->bi_size)
  194. return;
  195. bio->bi_sector += offset;
  196. bio->bi_size = size;
  197. offset <<= 9;
  198. clear_bit(BIO_SEG_VALID, &bio->bi_flags);
  199. while (bio->bi_idx < bio->bi_vcnt &&
  200. bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
  201. /* remove this whole bio_vec */
  202. offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
  203. bio->bi_idx++;
  204. }
  205. if (bio->bi_idx < bio->bi_vcnt) {
  206. bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
  207. bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
  208. }
  209. /* avoid any complications with bi_idx being non-zero*/
  210. if (bio->bi_idx) {
  211. memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
  212. (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
  213. bio->bi_vcnt -= bio->bi_idx;
  214. bio->bi_idx = 0;
  215. }
  216. /* Make sure vcnt and last bv are not too big */
  217. bio_for_each_segment(bvec, bio, i) {
  218. if (sofar + bvec->bv_len > size)
  219. bvec->bv_len = size - sofar;
  220. if (bvec->bv_len == 0) {
  221. bio->bi_vcnt = i;
  222. break;
  223. }
  224. sofar += bvec->bv_len;
  225. }
  226. }
  227. EXPORT_SYMBOL_GPL(md_trim_bio);
  228. /*
  229. * We have a system wide 'event count' that is incremented
  230. * on any 'interesting' event, and readers of /proc/mdstat
  231. * can use 'poll' or 'select' to find out when the event
  232. * count increases.
  233. *
  234. * Events are:
  235. * start array, stop array, error, add device, remove device,
  236. * start build, activate spare
  237. */
  238. static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
  239. static atomic_t md_event_count;
  240. void md_new_event(struct mddev *mddev)
  241. {
  242. atomic_inc(&md_event_count);
  243. wake_up(&md_event_waiters);
  244. }
  245. EXPORT_SYMBOL_GPL(md_new_event);
  246. /* Alternate version that can be called from interrupts
  247. * when calling sysfs_notify isn't needed.
  248. */
  249. static void md_new_event_inintr(struct mddev *mddev)
  250. {
  251. atomic_inc(&md_event_count);
  252. wake_up(&md_event_waiters);
  253. }
  254. /*
  255. * Enables to iterate over all existing md arrays
  256. * all_mddevs_lock protects this list.
  257. */
  258. static LIST_HEAD(all_mddevs);
  259. static DEFINE_SPINLOCK(all_mddevs_lock);
  260. /*
  261. * iterates through all used mddevs in the system.
  262. * We take care to grab the all_mddevs_lock whenever navigating
  263. * the list, and to always hold a refcount when unlocked.
  264. * Any code which breaks out of this loop while own
  265. * a reference to the current mddev and must mddev_put it.
  266. */
  267. #define for_each_mddev(_mddev,_tmp) \
  268. \
  269. for (({ spin_lock(&all_mddevs_lock); \
  270. _tmp = all_mddevs.next; \
  271. _mddev = NULL;}); \
  272. ({ if (_tmp != &all_mddevs) \
  273. mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
  274. spin_unlock(&all_mddevs_lock); \
  275. if (_mddev) mddev_put(_mddev); \
  276. _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
  277. _tmp != &all_mddevs;}); \
  278. ({ spin_lock(&all_mddevs_lock); \
  279. _tmp = _tmp->next;}) \
  280. )
  281. /* Rather than calling directly into the personality make_request function,
  282. * IO requests come here first so that we can check if the device is
  283. * being suspended pending a reconfiguration.
  284. * We hold a refcount over the call to ->make_request. By the time that
  285. * call has finished, the bio has been linked into some internal structure
  286. * and so is visible to ->quiesce(), so we don't need the refcount any more.
  287. */
  288. static void md_make_request(struct request_queue *q, struct bio *bio)
  289. {
  290. const int rw = bio_data_dir(bio);
  291. struct mddev *mddev = q->queuedata;
  292. int cpu;
  293. unsigned int sectors;
  294. if (mddev == NULL || mddev->pers == NULL
  295. || !mddev->ready) {
  296. bio_io_error(bio);
  297. return;
  298. }
  299. smp_rmb(); /* Ensure implications of 'active' are visible */
  300. rcu_read_lock();
  301. if (mddev->suspended) {
  302. DEFINE_WAIT(__wait);
  303. for (;;) {
  304. prepare_to_wait(&mddev->sb_wait, &__wait,
  305. TASK_UNINTERRUPTIBLE);
  306. if (!mddev->suspended)
  307. break;
  308. rcu_read_unlock();
  309. schedule();
  310. rcu_read_lock();
  311. }
  312. finish_wait(&mddev->sb_wait, &__wait);
  313. }
  314. atomic_inc(&mddev->active_io);
  315. rcu_read_unlock();
  316. /*
  317. * save the sectors now since our bio can
  318. * go away inside make_request
  319. */
  320. sectors = bio_sectors(bio);
  321. mddev->pers->make_request(mddev, bio);
  322. cpu = part_stat_lock();
  323. part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
  324. part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
  325. part_stat_unlock();
  326. if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
  327. wake_up(&mddev->sb_wait);
  328. }
  329. /* mddev_suspend makes sure no new requests are submitted
  330. * to the device, and that any requests that have been submitted
  331. * are completely handled.
  332. * Once ->stop is called and completes, the module will be completely
  333. * unused.
  334. */
  335. void mddev_suspend(struct mddev *mddev)
  336. {
  337. BUG_ON(mddev->suspended);
  338. mddev->suspended = 1;
  339. synchronize_rcu();
  340. wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
  341. mddev->pers->quiesce(mddev, 1);
  342. del_timer_sync(&mddev->safemode_timer);
  343. }
  344. EXPORT_SYMBOL_GPL(mddev_suspend);
  345. void mddev_resume(struct mddev *mddev)
  346. {
  347. mddev->suspended = 0;
  348. wake_up(&mddev->sb_wait);
  349. mddev->pers->quiesce(mddev, 0);
  350. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  351. md_wakeup_thread(mddev->thread);
  352. md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
  353. }
  354. EXPORT_SYMBOL_GPL(mddev_resume);
  355. int mddev_congested(struct mddev *mddev, int bits)
  356. {
  357. return mddev->suspended;
  358. }
  359. EXPORT_SYMBOL(mddev_congested);
  360. /*
  361. * Generic flush handling for md
  362. */
  363. static void md_end_flush(struct bio *bio, int err)
  364. {
  365. struct md_rdev *rdev = bio->bi_private;
  366. struct mddev *mddev = rdev->mddev;
  367. rdev_dec_pending(rdev, mddev);
  368. if (atomic_dec_and_test(&mddev->flush_pending)) {
  369. /* The pre-request flush has finished */
  370. queue_work(md_wq, &mddev->flush_work);
  371. }
  372. bio_put(bio);
  373. }
  374. static void md_submit_flush_data(struct work_struct *ws);
  375. static void submit_flushes(struct work_struct *ws)
  376. {
  377. struct mddev *mddev = container_of(ws, struct mddev, flush_work);
  378. struct md_rdev *rdev;
  379. INIT_WORK(&mddev->flush_work, md_submit_flush_data);
  380. atomic_set(&mddev->flush_pending, 1);
  381. rcu_read_lock();
  382. rdev_for_each_rcu(rdev, mddev)
  383. if (rdev->raid_disk >= 0 &&
  384. !test_bit(Faulty, &rdev->flags)) {
  385. /* Take two references, one is dropped
  386. * when request finishes, one after
  387. * we reclaim rcu_read_lock
  388. */
  389. struct bio *bi;
  390. atomic_inc(&rdev->nr_pending);
  391. atomic_inc(&rdev->nr_pending);
  392. rcu_read_unlock();
  393. bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
  394. bi->bi_end_io = md_end_flush;
  395. bi->bi_private = rdev;
  396. bi->bi_bdev = rdev->bdev;
  397. atomic_inc(&mddev->flush_pending);
  398. submit_bio(WRITE_FLUSH, bi);
  399. rcu_read_lock();
  400. rdev_dec_pending(rdev, mddev);
  401. }
  402. rcu_read_unlock();
  403. if (atomic_dec_and_test(&mddev->flush_pending))
  404. queue_work(md_wq, &mddev->flush_work);
  405. }
  406. static void md_submit_flush_data(struct work_struct *ws)
  407. {
  408. struct mddev *mddev = container_of(ws, struct mddev, flush_work);
  409. struct bio *bio = mddev->flush_bio;
  410. if (bio->bi_size == 0)
  411. /* an empty barrier - all done */
  412. bio_endio(bio, 0);
  413. else {
  414. bio->bi_rw &= ~REQ_FLUSH;
  415. mddev->pers->make_request(mddev, bio);
  416. }
  417. mddev->flush_bio = NULL;
  418. wake_up(&mddev->sb_wait);
  419. }
  420. void md_flush_request(struct mddev *mddev, struct bio *bio)
  421. {
  422. spin_lock_irq(&mddev->write_lock);
  423. wait_event_lock_irq(mddev->sb_wait,
  424. !mddev->flush_bio,
  425. mddev->write_lock, /*nothing*/);
  426. mddev->flush_bio = bio;
  427. spin_unlock_irq(&mddev->write_lock);
  428. INIT_WORK(&mddev->flush_work, submit_flushes);
  429. queue_work(md_wq, &mddev->flush_work);
  430. }
  431. EXPORT_SYMBOL(md_flush_request);
  432. /* Support for plugging.
  433. * This mirrors the plugging support in request_queue, but does not
  434. * require having a whole queue or request structures.
  435. * We allocate an md_plug_cb for each md device and each thread it gets
  436. * plugged on. This links tot the private plug_handle structure in the
  437. * personality data where we keep a count of the number of outstanding
  438. * plugs so other code can see if a plug is active.
  439. */
  440. struct md_plug_cb {
  441. struct blk_plug_cb cb;
  442. struct mddev *mddev;
  443. };
  444. static void plugger_unplug(struct blk_plug_cb *cb)
  445. {
  446. struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
  447. if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
  448. md_wakeup_thread(mdcb->mddev->thread);
  449. kfree(mdcb);
  450. }
  451. /* Check that an unplug wakeup will come shortly.
  452. * If not, wakeup the md thread immediately
  453. */
  454. int mddev_check_plugged(struct mddev *mddev)
  455. {
  456. struct blk_plug *plug = current->plug;
  457. struct md_plug_cb *mdcb;
  458. if (!plug)
  459. return 0;
  460. list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
  461. if (mdcb->cb.callback == plugger_unplug &&
  462. mdcb->mddev == mddev) {
  463. /* Already on the list, move to top */
  464. if (mdcb != list_first_entry(&plug->cb_list,
  465. struct md_plug_cb,
  466. cb.list))
  467. list_move(&mdcb->cb.list, &plug->cb_list);
  468. return 1;
  469. }
  470. }
  471. /* Not currently on the callback list */
  472. mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
  473. if (!mdcb)
  474. return 0;
  475. mdcb->mddev = mddev;
  476. mdcb->cb.callback = plugger_unplug;
  477. atomic_inc(&mddev->plug_cnt);
  478. list_add(&mdcb->cb.list, &plug->cb_list);
  479. return 1;
  480. }
  481. EXPORT_SYMBOL_GPL(mddev_check_plugged);
  482. static inline struct mddev *mddev_get(struct mddev *mddev)
  483. {
  484. atomic_inc(&mddev->active);
  485. return mddev;
  486. }
  487. static void mddev_delayed_delete(struct work_struct *ws);
  488. static void mddev_put(struct mddev *mddev)
  489. {
  490. struct bio_set *bs = NULL;
  491. if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
  492. return;
  493. if (!mddev->raid_disks && list_empty(&mddev->disks) &&
  494. mddev->ctime == 0 && !mddev->hold_active) {
  495. /* Array is not configured at all, and not held active,
  496. * so destroy it */
  497. list_del_init(&mddev->all_mddevs);
  498. bs = mddev->bio_set;
  499. mddev->bio_set = NULL;
  500. if (mddev->gendisk) {
  501. /* We did a probe so need to clean up. Call
  502. * queue_work inside the spinlock so that
  503. * flush_workqueue() after mddev_find will
  504. * succeed in waiting for the work to be done.
  505. */
  506. INIT_WORK(&mddev->del_work, mddev_delayed_delete);
  507. queue_work(md_misc_wq, &mddev->del_work);
  508. } else
  509. kfree(mddev);
  510. }
  511. spin_unlock(&all_mddevs_lock);
  512. if (bs)
  513. bioset_free(bs);
  514. }
  515. void mddev_init(struct mddev *mddev)
  516. {
  517. mutex_init(&mddev->open_mutex);
  518. mutex_init(&mddev->reconfig_mutex);
  519. mutex_init(&mddev->bitmap_info.mutex);
  520. INIT_LIST_HEAD(&mddev->disks);
  521. INIT_LIST_HEAD(&mddev->all_mddevs);
  522. init_timer(&mddev->safemode_timer);
  523. atomic_set(&mddev->active, 1);
  524. atomic_set(&mddev->openers, 0);
  525. atomic_set(&mddev->active_io, 0);
  526. atomic_set(&mddev->plug_cnt, 0);
  527. spin_lock_init(&mddev->write_lock);
  528. atomic_set(&mddev->flush_pending, 0);
  529. init_waitqueue_head(&mddev->sb_wait);
  530. init_waitqueue_head(&mddev->recovery_wait);
  531. mddev->reshape_position = MaxSector;
  532. mddev->reshape_backwards = 0;
  533. mddev->resync_min = 0;
  534. mddev->resync_max = MaxSector;
  535. mddev->level = LEVEL_NONE;
  536. }
  537. EXPORT_SYMBOL_GPL(mddev_init);
  538. static struct mddev * mddev_find(dev_t unit)
  539. {
  540. struct mddev *mddev, *new = NULL;
  541. if (unit && MAJOR(unit) != MD_MAJOR)
  542. unit &= ~((1<<MdpMinorShift)-1);
  543. retry:
  544. spin_lock(&all_mddevs_lock);
  545. if (unit) {
  546. list_for_each_entry(mddev, &all_mddevs, all_mddevs)
  547. if (mddev->unit == unit) {
  548. mddev_get(mddev);
  549. spin_unlock(&all_mddevs_lock);
  550. kfree(new);
  551. return mddev;
  552. }
  553. if (new) {
  554. list_add(&new->all_mddevs, &all_mddevs);
  555. spin_unlock(&all_mddevs_lock);
  556. new->hold_active = UNTIL_IOCTL;
  557. return new;
  558. }
  559. } else if (new) {
  560. /* find an unused unit number */
  561. static int next_minor = 512;
  562. int start = next_minor;
  563. int is_free = 0;
  564. int dev = 0;
  565. while (!is_free) {
  566. dev = MKDEV(MD_MAJOR, next_minor);
  567. next_minor++;
  568. if (next_minor > MINORMASK)
  569. next_minor = 0;
  570. if (next_minor == start) {
  571. /* Oh dear, all in use. */
  572. spin_unlock(&all_mddevs_lock);
  573. kfree(new);
  574. return NULL;
  575. }
  576. is_free = 1;
  577. list_for_each_entry(mddev, &all_mddevs, all_mddevs)
  578. if (mddev->unit == dev) {
  579. is_free = 0;
  580. break;
  581. }
  582. }
  583. new->unit = dev;
  584. new->md_minor = MINOR(dev);
  585. new->hold_active = UNTIL_STOP;
  586. list_add(&new->all_mddevs, &all_mddevs);
  587. spin_unlock(&all_mddevs_lock);
  588. return new;
  589. }
  590. spin_unlock(&all_mddevs_lock);
  591. new = kzalloc(sizeof(*new), GFP_KERNEL);
  592. if (!new)
  593. return NULL;
  594. new->unit = unit;
  595. if (MAJOR(unit) == MD_MAJOR)
  596. new->md_minor = MINOR(unit);
  597. else
  598. new->md_minor = MINOR(unit) >> MdpMinorShift;
  599. mddev_init(new);
  600. goto retry;
  601. }
  602. static inline int mddev_lock(struct mddev * mddev)
  603. {
  604. return mutex_lock_interruptible(&mddev->reconfig_mutex);
  605. }
  606. static inline int mddev_is_locked(struct mddev *mddev)
  607. {
  608. return mutex_is_locked(&mddev->reconfig_mutex);
  609. }
  610. static inline int mddev_trylock(struct mddev * mddev)
  611. {
  612. return mutex_trylock(&mddev->reconfig_mutex);
  613. }
  614. static struct attribute_group md_redundancy_group;
  615. static void mddev_unlock(struct mddev * mddev)
  616. {
  617. if (mddev->to_remove) {
  618. /* These cannot be removed under reconfig_mutex as
  619. * an access to the files will try to take reconfig_mutex
  620. * while holding the file unremovable, which leads to
  621. * a deadlock.
  622. * So hold set sysfs_active while the remove in happeing,
  623. * and anything else which might set ->to_remove or my
  624. * otherwise change the sysfs namespace will fail with
  625. * -EBUSY if sysfs_active is still set.
  626. * We set sysfs_active under reconfig_mutex and elsewhere
  627. * test it under the same mutex to ensure its correct value
  628. * is seen.
  629. */
  630. struct attribute_group *to_remove = mddev->to_remove;
  631. mddev->to_remove = NULL;
  632. mddev->sysfs_active = 1;
  633. mutex_unlock(&mddev->reconfig_mutex);
  634. if (mddev->kobj.sd) {
  635. if (to_remove != &md_redundancy_group)
  636. sysfs_remove_group(&mddev->kobj, to_remove);
  637. if (mddev->pers == NULL ||
  638. mddev->pers->sync_request == NULL) {
  639. sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
  640. if (mddev->sysfs_action)
  641. sysfs_put(mddev->sysfs_action);
  642. mddev->sysfs_action = NULL;
  643. }
  644. }
  645. mddev->sysfs_active = 0;
  646. } else
  647. mutex_unlock(&mddev->reconfig_mutex);
  648. /* As we've dropped the mutex we need a spinlock to
  649. * make sure the thread doesn't disappear
  650. */
  651. spin_lock(&pers_lock);
  652. md_wakeup_thread(mddev->thread);
  653. spin_unlock(&pers_lock);
  654. }
  655. static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr)
  656. {
  657. struct md_rdev *rdev;
  658. rdev_for_each(rdev, mddev)
  659. if (rdev->desc_nr == nr)
  660. return rdev;
  661. return NULL;
  662. }
  663. static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev)
  664. {
  665. struct md_rdev *rdev;
  666. rdev_for_each(rdev, mddev)
  667. if (rdev->bdev->bd_dev == dev)
  668. return rdev;
  669. return NULL;
  670. }
  671. static struct md_personality *find_pers(int level, char *clevel)
  672. {
  673. struct md_personality *pers;
  674. list_for_each_entry(pers, &pers_list, list) {
  675. if (level != LEVEL_NONE && pers->level == level)
  676. return pers;
  677. if (strcmp(pers->name, clevel)==0)
  678. return pers;
  679. }
  680. return NULL;
  681. }
  682. /* return the offset of the super block in 512byte sectors */
  683. static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
  684. {
  685. sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
  686. return MD_NEW_SIZE_SECTORS(num_sectors);
  687. }
  688. static int alloc_disk_sb(struct md_rdev * rdev)
  689. {
  690. if (rdev->sb_page)
  691. MD_BUG();
  692. rdev->sb_page = alloc_page(GFP_KERNEL);
  693. if (!rdev->sb_page) {
  694. printk(KERN_ALERT "md: out of memory.\n");
  695. return -ENOMEM;
  696. }
  697. return 0;
  698. }
  699. void md_rdev_clear(struct md_rdev *rdev)
  700. {
  701. if (rdev->sb_page) {
  702. put_page(rdev->sb_page);
  703. rdev->sb_loaded = 0;
  704. rdev->sb_page = NULL;
  705. rdev->sb_start = 0;
  706. rdev->sectors = 0;
  707. }
  708. if (rdev->bb_page) {
  709. put_page(rdev->bb_page);
  710. rdev->bb_page = NULL;
  711. }
  712. kfree(rdev->badblocks.page);
  713. rdev->badblocks.page = NULL;
  714. }
  715. EXPORT_SYMBOL_GPL(md_rdev_clear);
  716. static void super_written(struct bio *bio, int error)
  717. {
  718. struct md_rdev *rdev = bio->bi_private;
  719. struct mddev *mddev = rdev->mddev;
  720. if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
  721. printk("md: super_written gets error=%d, uptodate=%d\n",
  722. error, test_bit(BIO_UPTODATE, &bio->bi_flags));
  723. WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
  724. md_error(mddev, rdev);
  725. }
  726. if (atomic_dec_and_test(&mddev->pending_writes))
  727. wake_up(&mddev->sb_wait);
  728. bio_put(bio);
  729. }
  730. void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
  731. sector_t sector, int size, struct page *page)
  732. {
  733. /* write first size bytes of page to sector of rdev
  734. * Increment mddev->pending_writes before returning
  735. * and decrement it on completion, waking up sb_wait
  736. * if zero is reached.
  737. * If an error occurred, call md_error
  738. */
  739. struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
  740. bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
  741. bio->bi_sector = sector;
  742. bio_add_page(bio, page, size, 0);
  743. bio->bi_private = rdev;
  744. bio->bi_end_io = super_written;
  745. atomic_inc(&mddev->pending_writes);
  746. submit_bio(WRITE_FLUSH_FUA, bio);
  747. }
  748. void md_super_wait(struct mddev *mddev)
  749. {
  750. /* wait for all superblock writes that were scheduled to complete */
  751. DEFINE_WAIT(wq);
  752. for(;;) {
  753. prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
  754. if (atomic_read(&mddev->pending_writes)==0)
  755. break;
  756. schedule();
  757. }
  758. finish_wait(&mddev->sb_wait, &wq);
  759. }
  760. static void bi_complete(struct bio *bio, int error)
  761. {
  762. complete((struct completion*)bio->bi_private);
  763. }
  764. int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
  765. struct page *page, int rw, bool metadata_op)
  766. {
  767. struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
  768. struct completion event;
  769. int ret;
  770. rw |= REQ_SYNC;
  771. bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
  772. rdev->meta_bdev : rdev->bdev;
  773. if (metadata_op)
  774. bio->bi_sector = sector + rdev->sb_start;
  775. else if (rdev->mddev->reshape_position != MaxSector &&
  776. (rdev->mddev->reshape_backwards ==
  777. (sector >= rdev->mddev->reshape_position)))
  778. bio->bi_sector = sector + rdev->new_data_offset;
  779. else
  780. bio->bi_sector = sector + rdev->data_offset;
  781. bio_add_page(bio, page, size, 0);
  782. init_completion(&event);
  783. bio->bi_private = &event;
  784. bio->bi_end_io = bi_complete;
  785. submit_bio(rw, bio);
  786. wait_for_completion(&event);
  787. ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
  788. bio_put(bio);
  789. return ret;
  790. }
  791. EXPORT_SYMBOL_GPL(sync_page_io);
  792. static int read_disk_sb(struct md_rdev * rdev, int size)
  793. {
  794. char b[BDEVNAME_SIZE];
  795. if (!rdev->sb_page) {
  796. MD_BUG();
  797. return -EINVAL;
  798. }
  799. if (rdev->sb_loaded)
  800. return 0;
  801. if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
  802. goto fail;
  803. rdev->sb_loaded = 1;
  804. return 0;
  805. fail:
  806. printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
  807. bdevname(rdev->bdev,b));
  808. return -EINVAL;
  809. }
  810. static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
  811. {
  812. return sb1->set_uuid0 == sb2->set_uuid0 &&
  813. sb1->set_uuid1 == sb2->set_uuid1 &&
  814. sb1->set_uuid2 == sb2->set_uuid2 &&
  815. sb1->set_uuid3 == sb2->set_uuid3;
  816. }
  817. static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
  818. {
  819. int ret;
  820. mdp_super_t *tmp1, *tmp2;
  821. tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
  822. tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
  823. if (!tmp1 || !tmp2) {
  824. ret = 0;
  825. printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
  826. goto abort;
  827. }
  828. *tmp1 = *sb1;
  829. *tmp2 = *sb2;
  830. /*
  831. * nr_disks is not constant
  832. */
  833. tmp1->nr_disks = 0;
  834. tmp2->nr_disks = 0;
  835. ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
  836. abort:
  837. kfree(tmp1);
  838. kfree(tmp2);
  839. return ret;
  840. }
  841. static u32 md_csum_fold(u32 csum)
  842. {
  843. csum = (csum & 0xffff) + (csum >> 16);
  844. return (csum & 0xffff) + (csum >> 16);
  845. }
  846. static unsigned int calc_sb_csum(mdp_super_t * sb)
  847. {
  848. u64 newcsum = 0;
  849. u32 *sb32 = (u32*)sb;
  850. int i;
  851. unsigned int disk_csum, csum;
  852. disk_csum = sb->sb_csum;
  853. sb->sb_csum = 0;
  854. for (i = 0; i < MD_SB_BYTES/4 ; i++)
  855. newcsum += sb32[i];
  856. csum = (newcsum & 0xffffffff) + (newcsum>>32);
  857. #ifdef CONFIG_ALPHA
  858. /* This used to use csum_partial, which was wrong for several
  859. * reasons including that different results are returned on
  860. * different architectures. It isn't critical that we get exactly
  861. * the same return value as before (we always csum_fold before
  862. * testing, and that removes any differences). However as we
  863. * know that csum_partial always returned a 16bit value on
  864. * alphas, do a fold to maximise conformity to previous behaviour.
  865. */
  866. sb->sb_csum = md_csum_fold(disk_csum);
  867. #else
  868. sb->sb_csum = disk_csum;
  869. #endif
  870. return csum;
  871. }
  872. /*
  873. * Handle superblock details.
  874. * We want to be able to handle multiple superblock formats
  875. * so we have a common interface to them all, and an array of
  876. * different handlers.
  877. * We rely on user-space to write the initial superblock, and support
  878. * reading and updating of superblocks.
  879. * Interface methods are:
  880. * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
  881. * loads and validates a superblock on dev.
  882. * if refdev != NULL, compare superblocks on both devices
  883. * Return:
  884. * 0 - dev has a superblock that is compatible with refdev
  885. * 1 - dev has a superblock that is compatible and newer than refdev
  886. * so dev should be used as the refdev in future
  887. * -EINVAL superblock incompatible or invalid
  888. * -othererror e.g. -EIO
  889. *
  890. * int validate_super(struct mddev *mddev, struct md_rdev *dev)
  891. * Verify that dev is acceptable into mddev.
  892. * The first time, mddev->raid_disks will be 0, and data from
  893. * dev should be merged in. Subsequent calls check that dev
  894. * is new enough. Return 0 or -EINVAL
  895. *
  896. * void sync_super(struct mddev *mddev, struct md_rdev *dev)
  897. * Update the superblock for rdev with data in mddev
  898. * This does not write to disc.
  899. *
  900. */
  901. struct super_type {
  902. char *name;
  903. struct module *owner;
  904. int (*load_super)(struct md_rdev *rdev,
  905. struct md_rdev *refdev,
  906. int minor_version);
  907. int (*validate_super)(struct mddev *mddev,
  908. struct md_rdev *rdev);
  909. void (*sync_super)(struct mddev *mddev,
  910. struct md_rdev *rdev);
  911. unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
  912. sector_t num_sectors);
  913. int (*allow_new_offset)(struct md_rdev *rdev,
  914. unsigned long long new_offset);
  915. };
  916. /*
  917. * Check that the given mddev has no bitmap.
  918. *
  919. * This function is called from the run method of all personalities that do not
  920. * support bitmaps. It prints an error message and returns non-zero if mddev
  921. * has a bitmap. Otherwise, it returns 0.
  922. *
  923. */
  924. int md_check_no_bitmap(struct mddev *mddev)
  925. {
  926. if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
  927. return 0;
  928. printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
  929. mdname(mddev), mddev->pers->name);
  930. return 1;
  931. }
  932. EXPORT_SYMBOL(md_check_no_bitmap);
  933. /*
  934. * load_super for 0.90.0
  935. */
  936. static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
  937. {
  938. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  939. mdp_super_t *sb;
  940. int ret;
  941. /*
  942. * Calculate the position of the superblock (512byte sectors),
  943. * it's at the end of the disk.
  944. *
  945. * It also happens to be a multiple of 4Kb.
  946. */
  947. rdev->sb_start = calc_dev_sboffset(rdev);
  948. ret = read_disk_sb(rdev, MD_SB_BYTES);
  949. if (ret) return ret;
  950. ret = -EINVAL;
  951. bdevname(rdev->bdev, b);
  952. sb = page_address(rdev->sb_page);
  953. if (sb->md_magic != MD_SB_MAGIC) {
  954. printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
  955. b);
  956. goto abort;
  957. }
  958. if (sb->major_version != 0 ||
  959. sb->minor_version < 90 ||
  960. sb->minor_version > 91) {
  961. printk(KERN_WARNING "Bad version number %d.%d on %s\n",
  962. sb->major_version, sb->minor_version,
  963. b);
  964. goto abort;
  965. }
  966. if (sb->raid_disks <= 0)
  967. goto abort;
  968. if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
  969. printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
  970. b);
  971. goto abort;
  972. }
  973. rdev->preferred_minor = sb->md_minor;
  974. rdev->data_offset = 0;
  975. rdev->new_data_offset = 0;
  976. rdev->sb_size = MD_SB_BYTES;
  977. rdev->badblocks.shift = -1;
  978. if (sb->level == LEVEL_MULTIPATH)
  979. rdev->desc_nr = -1;
  980. else
  981. rdev->desc_nr = sb->this_disk.number;
  982. if (!refdev) {
  983. ret = 1;
  984. } else {
  985. __u64 ev1, ev2;
  986. mdp_super_t *refsb = page_address(refdev->sb_page);
  987. if (!uuid_equal(refsb, sb)) {
  988. printk(KERN_WARNING "md: %s has different UUID to %s\n",
  989. b, bdevname(refdev->bdev,b2));
  990. goto abort;
  991. }
  992. if (!sb_equal(refsb, sb)) {
  993. printk(KERN_WARNING "md: %s has same UUID"
  994. " but different superblock to %s\n",
  995. b, bdevname(refdev->bdev, b2));
  996. goto abort;
  997. }
  998. ev1 = md_event(sb);
  999. ev2 = md_event(refsb);
  1000. if (ev1 > ev2)
  1001. ret = 1;
  1002. else
  1003. ret = 0;
  1004. }
  1005. rdev->sectors = rdev->sb_start;
  1006. /* Limit to 4TB as metadata cannot record more than that */
  1007. if (rdev->sectors >= (2ULL << 32))
  1008. rdev->sectors = (2ULL << 32) - 2;
  1009. if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
  1010. /* "this cannot possibly happen" ... */
  1011. ret = -EINVAL;
  1012. abort:
  1013. return ret;
  1014. }
  1015. /*
  1016. * validate_super for 0.90.0
  1017. */
  1018. static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
  1019. {
  1020. mdp_disk_t *desc;
  1021. mdp_super_t *sb = page_address(rdev->sb_page);
  1022. __u64 ev1 = md_event(sb);
  1023. rdev->raid_disk = -1;
  1024. clear_bit(Faulty, &rdev->flags);
  1025. clear_bit(In_sync, &rdev->flags);
  1026. clear_bit(WriteMostly, &rdev->flags);
  1027. if (mddev->raid_disks == 0) {
  1028. mddev->major_version = 0;
  1029. mddev->minor_version = sb->minor_version;
  1030. mddev->patch_version = sb->patch_version;
  1031. mddev->external = 0;
  1032. mddev->chunk_sectors = sb->chunk_size >> 9;
  1033. mddev->ctime = sb->ctime;
  1034. mddev->utime = sb->utime;
  1035. mddev->level = sb->level;
  1036. mddev->clevel[0] = 0;
  1037. mddev->layout = sb->layout;
  1038. mddev->raid_disks = sb->raid_disks;
  1039. mddev->dev_sectors = ((sector_t)sb->size) * 2;
  1040. mddev->events = ev1;
  1041. mddev->bitmap_info.offset = 0;
  1042. mddev->bitmap_info.space = 0;
  1043. /* bitmap can use 60 K after the 4K superblocks */
  1044. mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
  1045. mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
  1046. mddev->reshape_backwards = 0;
  1047. if (mddev->minor_version >= 91) {
  1048. mddev->reshape_position = sb->reshape_position;
  1049. mddev->delta_disks = sb->delta_disks;
  1050. mddev->new_level = sb->new_level;
  1051. mddev->new_layout = sb->new_layout;
  1052. mddev->new_chunk_sectors = sb->new_chunk >> 9;
  1053. if (mddev->delta_disks < 0)
  1054. mddev->reshape_backwards = 1;
  1055. } else {
  1056. mddev->reshape_position = MaxSector;
  1057. mddev->delta_disks = 0;
  1058. mddev->new_level = mddev->level;
  1059. mddev->new_layout = mddev->layout;
  1060. mddev->new_chunk_sectors = mddev->chunk_sectors;
  1061. }
  1062. if (sb->state & (1<<MD_SB_CLEAN))
  1063. mddev->recovery_cp = MaxSector;
  1064. else {
  1065. if (sb->events_hi == sb->cp_events_hi &&
  1066. sb->events_lo == sb->cp_events_lo) {
  1067. mddev->recovery_cp = sb->recovery_cp;
  1068. } else
  1069. mddev->recovery_cp = 0;
  1070. }
  1071. memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
  1072. memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
  1073. memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
  1074. memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
  1075. mddev->max_disks = MD_SB_DISKS;
  1076. if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
  1077. mddev->bitmap_info.file == NULL) {
  1078. mddev->bitmap_info.offset =
  1079. mddev->bitmap_info.default_offset;
  1080. mddev->bitmap_info.space =
  1081. mddev->bitmap_info.space;
  1082. }
  1083. } else if (mddev->pers == NULL) {
  1084. /* Insist on good event counter while assembling, except
  1085. * for spares (which don't need an event count) */
  1086. ++ev1;
  1087. if (sb->disks[rdev->desc_nr].state & (
  1088. (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
  1089. if (ev1 < mddev->events)
  1090. return -EINVAL;
  1091. } else if (mddev->bitmap) {
  1092. /* if adding to array with a bitmap, then we can accept an
  1093. * older device ... but not too old.
  1094. */
  1095. if (ev1 < mddev->bitmap->events_cleared)
  1096. return 0;
  1097. } else {
  1098. if (ev1 < mddev->events)
  1099. /* just a hot-add of a new device, leave raid_disk at -1 */
  1100. return 0;
  1101. }
  1102. if (mddev->level != LEVEL_MULTIPATH) {
  1103. desc = sb->disks + rdev->desc_nr;
  1104. if (desc->state & (1<<MD_DISK_FAULTY))
  1105. set_bit(Faulty, &rdev->flags);
  1106. else if (desc->state & (1<<MD_DISK_SYNC) /* &&
  1107. desc->raid_disk < mddev->raid_disks */) {
  1108. set_bit(In_sync, &rdev->flags);
  1109. rdev->raid_disk = desc->raid_disk;
  1110. } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
  1111. /* active but not in sync implies recovery up to
  1112. * reshape position. We don't know exactly where
  1113. * that is, so set to zero for now */
  1114. if (mddev->minor_version >= 91) {
  1115. rdev->recovery_offset = 0;
  1116. rdev->raid_disk = desc->raid_disk;
  1117. }
  1118. }
  1119. if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
  1120. set_bit(WriteMostly, &rdev->flags);
  1121. } else /* MULTIPATH are always insync */
  1122. set_bit(In_sync, &rdev->flags);
  1123. return 0;
  1124. }
  1125. /*
  1126. * sync_super for 0.90.0
  1127. */
  1128. static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
  1129. {
  1130. mdp_super_t *sb;
  1131. struct md_rdev *rdev2;
  1132. int next_spare = mddev->raid_disks;
  1133. /* make rdev->sb match mddev data..
  1134. *
  1135. * 1/ zero out disks
  1136. * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
  1137. * 3/ any empty disks < next_spare become removed
  1138. *
  1139. * disks[0] gets initialised to REMOVED because
  1140. * we cannot be sure from other fields if it has
  1141. * been initialised or not.
  1142. */
  1143. int i;
  1144. int active=0, working=0,failed=0,spare=0,nr_disks=0;
  1145. rdev->sb_size = MD_SB_BYTES;
  1146. sb = page_address(rdev->sb_page);
  1147. memset(sb, 0, sizeof(*sb));
  1148. sb->md_magic = MD_SB_MAGIC;
  1149. sb->major_version = mddev->major_version;
  1150. sb->patch_version = mddev->patch_version;
  1151. sb->gvalid_words = 0; /* ignored */
  1152. memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
  1153. memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
  1154. memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
  1155. memcpy(&sb->set_uuid3, mddev->uuid+12,4);
  1156. sb->ctime = mddev->ctime;
  1157. sb->level = mddev->level;
  1158. sb->size = mddev->dev_sectors / 2;
  1159. sb->raid_disks = mddev->raid_disks;
  1160. sb->md_minor = mddev->md_minor;
  1161. sb->not_persistent = 0;
  1162. sb->utime = mddev->utime;
  1163. sb->state = 0;
  1164. sb->events_hi = (mddev->events>>32);
  1165. sb->events_lo = (u32)mddev->events;
  1166. if (mddev->reshape_position == MaxSector)
  1167. sb->minor_version = 90;
  1168. else {
  1169. sb->minor_version = 91;
  1170. sb->reshape_position = mddev->reshape_position;
  1171. sb->new_level = mddev->new_level;
  1172. sb->delta_disks = mddev->delta_disks;
  1173. sb->new_layout = mddev->new_layout;
  1174. sb->new_chunk = mddev->new_chunk_sectors << 9;
  1175. }
  1176. mddev->minor_version = sb->minor_version;
  1177. if (mddev->in_sync)
  1178. {
  1179. sb->recovery_cp = mddev->recovery_cp;
  1180. sb->cp_events_hi = (mddev->events>>32);
  1181. sb->cp_events_lo = (u32)mddev->events;
  1182. if (mddev->recovery_cp == MaxSector)
  1183. sb->state = (1<< MD_SB_CLEAN);
  1184. } else
  1185. sb->recovery_cp = 0;
  1186. sb->layout = mddev->layout;
  1187. sb->chunk_size = mddev->chunk_sectors << 9;
  1188. if (mddev->bitmap && mddev->bitmap_info.file == NULL)
  1189. sb->state |= (1<<MD_SB_BITMAP_PRESENT);
  1190. sb->disks[0].state = (1<<MD_DISK_REMOVED);
  1191. rdev_for_each(rdev2, mddev) {
  1192. mdp_disk_t *d;
  1193. int desc_nr;
  1194. int is_active = test_bit(In_sync, &rdev2->flags);
  1195. if (rdev2->raid_disk >= 0 &&
  1196. sb->minor_version >= 91)
  1197. /* we have nowhere to store the recovery_offset,
  1198. * but if it is not below the reshape_position,
  1199. * we can piggy-back on that.
  1200. */
  1201. is_active = 1;
  1202. if (rdev2->raid_disk < 0 ||
  1203. test_bit(Faulty, &rdev2->flags))
  1204. is_active = 0;
  1205. if (is_active)
  1206. desc_nr = rdev2->raid_disk;
  1207. else
  1208. desc_nr = next_spare++;
  1209. rdev2->desc_nr = desc_nr;
  1210. d = &sb->disks[rdev2->desc_nr];
  1211. nr_disks++;
  1212. d->number = rdev2->desc_nr;
  1213. d->major = MAJOR(rdev2->bdev->bd_dev);
  1214. d->minor = MINOR(rdev2->bdev->bd_dev);
  1215. if (is_active)
  1216. d->raid_disk = rdev2->raid_disk;
  1217. else
  1218. d->raid_disk = rdev2->desc_nr; /* compatibility */
  1219. if (test_bit(Faulty, &rdev2->flags))
  1220. d->state = (1<<MD_DISK_FAULTY);
  1221. else if (is_active) {
  1222. d->state = (1<<MD_DISK_ACTIVE);
  1223. if (test_bit(In_sync, &rdev2->flags))
  1224. d->state |= (1<<MD_DISK_SYNC);
  1225. active++;
  1226. working++;
  1227. } else {
  1228. d->state = 0;
  1229. spare++;
  1230. working++;
  1231. }
  1232. if (test_bit(WriteMostly, &rdev2->flags))
  1233. d->state |= (1<<MD_DISK_WRITEMOSTLY);
  1234. }
  1235. /* now set the "removed" and "faulty" bits on any missing devices */
  1236. for (i=0 ; i < mddev->raid_disks ; i++) {
  1237. mdp_disk_t *d = &sb->disks[i];
  1238. if (d->state == 0 && d->number == 0) {
  1239. d->number = i;
  1240. d->raid_disk = i;
  1241. d->state = (1<<MD_DISK_REMOVED);
  1242. d->state |= (1<<MD_DISK_FAULTY);
  1243. failed++;
  1244. }
  1245. }
  1246. sb->nr_disks = nr_disks;
  1247. sb->active_disks = active;
  1248. sb->working_disks = working;
  1249. sb->failed_disks = failed;
  1250. sb->spare_disks = spare;
  1251. sb->this_disk = sb->disks[rdev->desc_nr];
  1252. sb->sb_csum = calc_sb_csum(sb);
  1253. }
  1254. /*
  1255. * rdev_size_change for 0.90.0
  1256. */
  1257. static unsigned long long
  1258. super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
  1259. {
  1260. if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
  1261. return 0; /* component must fit device */
  1262. if (rdev->mddev->bitmap_info.offset)
  1263. return 0; /* can't move bitmap */
  1264. rdev->sb_start = calc_dev_sboffset(rdev);
  1265. if (!num_sectors || num_sectors > rdev->sb_start)
  1266. num_sectors = rdev->sb_start;
  1267. /* Limit to 4TB as metadata cannot record more than that.
  1268. * 4TB == 2^32 KB, or 2*2^32 sectors.
  1269. */
  1270. if (num_sectors >= (2ULL << 32))
  1271. num_sectors = (2ULL << 32) - 2;
  1272. md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
  1273. rdev->sb_page);
  1274. md_super_wait(rdev->mddev);
  1275. return num_sectors;
  1276. }
  1277. static int
  1278. super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
  1279. {
  1280. /* non-zero offset changes not possible with v0.90 */
  1281. return new_offset == 0;
  1282. }
  1283. /*
  1284. * version 1 superblock
  1285. */
  1286. static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
  1287. {
  1288. __le32 disk_csum;
  1289. u32 csum;
  1290. unsigned long long newcsum;
  1291. int size = 256 + le32_to_cpu(sb->max_dev)*2;
  1292. __le32 *isuper = (__le32*)sb;
  1293. int i;
  1294. disk_csum = sb->sb_csum;
  1295. sb->sb_csum = 0;
  1296. newcsum = 0;
  1297. for (i=0; size>=4; size -= 4 )
  1298. newcsum += le32_to_cpu(*isuper++);
  1299. if (size == 2)
  1300. newcsum += le16_to_cpu(*(__le16*) isuper);
  1301. csum = (newcsum & 0xffffffff) + (newcsum >> 32);
  1302. sb->sb_csum = disk_csum;
  1303. return cpu_to_le32(csum);
  1304. }
  1305. static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
  1306. int acknowledged);
  1307. static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
  1308. {
  1309. struct mdp_superblock_1 *sb;
  1310. int ret;
  1311. sector_t sb_start;
  1312. sector_t sectors;
  1313. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  1314. int bmask;
  1315. /*
  1316. * Calculate the position of the superblock in 512byte sectors.
  1317. * It is always aligned to a 4K boundary and
  1318. * depeding on minor_version, it can be:
  1319. * 0: At least 8K, but less than 12K, from end of device
  1320. * 1: At start of device
  1321. * 2: 4K from start of device.
  1322. */
  1323. switch(minor_version) {
  1324. case 0:
  1325. sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
  1326. sb_start -= 8*2;
  1327. sb_start &= ~(sector_t)(4*2-1);
  1328. break;
  1329. case 1:
  1330. sb_start = 0;
  1331. break;
  1332. case 2:
  1333. sb_start = 8;
  1334. break;
  1335. default:
  1336. return -EINVAL;
  1337. }
  1338. rdev->sb_start = sb_start;
  1339. /* superblock is rarely larger than 1K, but it can be larger,
  1340. * and it is safe to read 4k, so we do that
  1341. */
  1342. ret = read_disk_sb(rdev, 4096);
  1343. if (ret) return ret;
  1344. sb = page_address(rdev->sb_page);
  1345. if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
  1346. sb->major_version != cpu_to_le32(1) ||
  1347. le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
  1348. le64_to_cpu(sb->super_offset) != rdev->sb_start ||
  1349. (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
  1350. return -EINVAL;
  1351. if (calc_sb_1_csum(sb) != sb->sb_csum) {
  1352. printk("md: invalid superblock checksum on %s\n",
  1353. bdevname(rdev->bdev,b));
  1354. return -EINVAL;
  1355. }
  1356. if (le64_to_cpu(sb->data_size) < 10) {
  1357. printk("md: data_size too small on %s\n",
  1358. bdevname(rdev->bdev,b));
  1359. return -EINVAL;
  1360. }
  1361. if (sb->pad0 ||
  1362. sb->pad3[0] ||
  1363. memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
  1364. /* Some padding is non-zero, might be a new feature */
  1365. return -EINVAL;
  1366. rdev->preferred_minor = 0xffff;
  1367. rdev->data_offset = le64_to_cpu(sb->data_offset);
  1368. rdev->new_data_offset = rdev->data_offset;
  1369. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
  1370. (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
  1371. rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
  1372. atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
  1373. rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
  1374. bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
  1375. if (rdev->sb_size & bmask)
  1376. rdev->sb_size = (rdev->sb_size | bmask) + 1;
  1377. if (minor_version
  1378. && rdev->data_offset < sb_start + (rdev->sb_size/512))
  1379. return -EINVAL;
  1380. if (minor_version
  1381. && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
  1382. return -EINVAL;
  1383. if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
  1384. rdev->desc_nr = -1;
  1385. else
  1386. rdev->desc_nr = le32_to_cpu(sb->dev_number);
  1387. if (!rdev->bb_page) {
  1388. rdev->bb_page = alloc_page(GFP_KERNEL);
  1389. if (!rdev->bb_page)
  1390. return -ENOMEM;
  1391. }
  1392. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
  1393. rdev->badblocks.count == 0) {
  1394. /* need to load the bad block list.
  1395. * Currently we limit it to one page.
  1396. */
  1397. s32 offset;
  1398. sector_t bb_sector;
  1399. u64 *bbp;
  1400. int i;
  1401. int sectors = le16_to_cpu(sb->bblog_size);
  1402. if (sectors > (PAGE_SIZE / 512))
  1403. return -EINVAL;
  1404. offset = le32_to_cpu(sb->bblog_offset);
  1405. if (offset == 0)
  1406. return -EINVAL;
  1407. bb_sector = (long long)offset;
  1408. if (!sync_page_io(rdev, bb_sector, sectors << 9,
  1409. rdev->bb_page, READ, true))
  1410. return -EIO;
  1411. bbp = (u64 *)page_address(rdev->bb_page);
  1412. rdev->badblocks.shift = sb->bblog_shift;
  1413. for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
  1414. u64 bb = le64_to_cpu(*bbp);
  1415. int count = bb & (0x3ff);
  1416. u64 sector = bb >> 10;
  1417. sector <<= sb->bblog_shift;
  1418. count <<= sb->bblog_shift;
  1419. if (bb + 1 == 0)
  1420. break;
  1421. if (md_set_badblocks(&rdev->badblocks,
  1422. sector, count, 1) == 0)
  1423. return -EINVAL;
  1424. }
  1425. } else if (sb->bblog_offset == 0)
  1426. rdev->badblocks.shift = -1;
  1427. if (!refdev) {
  1428. ret = 1;
  1429. } else {
  1430. __u64 ev1, ev2;
  1431. struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
  1432. if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
  1433. sb->level != refsb->level ||
  1434. sb->layout != refsb->layout ||
  1435. sb->chunksize != refsb->chunksize) {
  1436. printk(KERN_WARNING "md: %s has strangely different"
  1437. " superblock to %s\n",
  1438. bdevname(rdev->bdev,b),
  1439. bdevname(refdev->bdev,b2));
  1440. return -EINVAL;
  1441. }
  1442. ev1 = le64_to_cpu(sb->events);
  1443. ev2 = le64_to_cpu(refsb->events);
  1444. if (ev1 > ev2)
  1445. ret = 1;
  1446. else
  1447. ret = 0;
  1448. }
  1449. if (minor_version) {
  1450. sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
  1451. sectors -= rdev->data_offset;
  1452. } else
  1453. sectors = rdev->sb_start;
  1454. if (sectors < le64_to_cpu(sb->data_size))
  1455. return -EINVAL;
  1456. rdev->sectors = le64_to_cpu(sb->data_size);
  1457. return ret;
  1458. }
  1459. static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
  1460. {
  1461. struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
  1462. __u64 ev1 = le64_to_cpu(sb->events);
  1463. rdev->raid_disk = -1;
  1464. clear_bit(Faulty, &rdev->flags);
  1465. clear_bit(In_sync, &rdev->flags);
  1466. clear_bit(WriteMostly, &rdev->flags);
  1467. if (mddev->raid_disks == 0) {
  1468. mddev->major_version = 1;
  1469. mddev->patch_version = 0;
  1470. mddev->external = 0;
  1471. mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
  1472. mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
  1473. mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
  1474. mddev->level = le32_to_cpu(sb->level);
  1475. mddev->clevel[0] = 0;
  1476. mddev->layout = le32_to_cpu(sb->layout);
  1477. mddev->raid_disks = le32_to_cpu(sb->raid_disks);
  1478. mddev->dev_sectors = le64_to_cpu(sb->size);
  1479. mddev->events = ev1;
  1480. mddev->bitmap_info.offset = 0;
  1481. mddev->bitmap_info.space = 0;
  1482. /* Default location for bitmap is 1K after superblock
  1483. * using 3K - total of 4K
  1484. */
  1485. mddev->bitmap_info.default_offset = 1024 >> 9;
  1486. mddev->bitmap_info.default_space = (4096-1024) >> 9;
  1487. mddev->reshape_backwards = 0;
  1488. mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
  1489. memcpy(mddev->uuid, sb->set_uuid, 16);
  1490. mddev->max_disks = (4096-256)/2;
  1491. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
  1492. mddev->bitmap_info.file == NULL) {
  1493. mddev->bitmap_info.offset =
  1494. (__s32)le32_to_cpu(sb->bitmap_offset);
  1495. /* Metadata doesn't record how much space is available.
  1496. * For 1.0, we assume we can use up to the superblock
  1497. * if before, else to 4K beyond superblock.
  1498. * For others, assume no change is possible.
  1499. */
  1500. if (mddev->minor_version > 0)
  1501. mddev->bitmap_info.space = 0;
  1502. else if (mddev->bitmap_info.offset > 0)
  1503. mddev->bitmap_info.space =
  1504. 8 - mddev->bitmap_info.offset;
  1505. else
  1506. mddev->bitmap_info.space =
  1507. -mddev->bitmap_info.offset;
  1508. }
  1509. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
  1510. mddev->reshape_position = le64_to_cpu(sb->reshape_position);
  1511. mddev->delta_disks = le32_to_cpu(sb->delta_disks);
  1512. mddev->new_level = le32_to_cpu(sb->new_level);
  1513. mddev->new_layout = le32_to_cpu(sb->new_layout);
  1514. mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
  1515. if (mddev->delta_disks < 0 ||
  1516. (mddev->delta_disks == 0 &&
  1517. (le32_to_cpu(sb->feature_map)
  1518. & MD_FEATURE_RESHAPE_BACKWARDS)))
  1519. mddev->reshape_backwards = 1;
  1520. } else {
  1521. mddev->reshape_position = MaxSector;
  1522. mddev->delta_disks = 0;
  1523. mddev->new_level = mddev->level;
  1524. mddev->new_layout = mddev->layout;
  1525. mddev->new_chunk_sectors = mddev->chunk_sectors;
  1526. }
  1527. } else if (mddev->pers == NULL) {
  1528. /* Insist of good event counter while assembling, except for
  1529. * spares (which don't need an event count) */
  1530. ++ev1;
  1531. if (rdev->desc_nr >= 0 &&
  1532. rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
  1533. le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
  1534. if (ev1 < mddev->events)
  1535. return -EINVAL;
  1536. } else if (mddev->bitmap) {
  1537. /* If adding to array with a bitmap, then we can accept an
  1538. * older device, but not too old.
  1539. */
  1540. if (ev1 < mddev->bitmap->events_cleared)
  1541. return 0;
  1542. } else {
  1543. if (ev1 < mddev->events)
  1544. /* just a hot-add of a new device, leave raid_disk at -1 */
  1545. return 0;
  1546. }
  1547. if (mddev->level != LEVEL_MULTIPATH) {
  1548. int role;
  1549. if (rdev->desc_nr < 0 ||
  1550. rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
  1551. role = 0xffff;
  1552. rdev->desc_nr = -1;
  1553. } else
  1554. role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
  1555. switch(role) {
  1556. case 0xffff: /* spare */
  1557. break;
  1558. case 0xfffe: /* faulty */
  1559. set_bit(Faulty, &rdev->flags);
  1560. break;
  1561. default:
  1562. if ((le32_to_cpu(sb->feature_map) &
  1563. MD_FEATURE_RECOVERY_OFFSET))
  1564. rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
  1565. else
  1566. set_bit(In_sync, &rdev->flags);
  1567. rdev->raid_disk = role;
  1568. break;
  1569. }
  1570. if (sb->devflags & WriteMostly1)
  1571. set_bit(WriteMostly, &rdev->flags);
  1572. if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
  1573. set_bit(Replacement, &rdev->flags);
  1574. } else /* MULTIPATH are always insync */
  1575. set_bit(In_sync, &rdev->flags);
  1576. return 0;
  1577. }
  1578. static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
  1579. {
  1580. struct mdp_superblock_1 *sb;
  1581. struct md_rdev *rdev2;
  1582. int max_dev, i;
  1583. /* make rdev->sb match mddev and rdev data. */
  1584. sb = page_address(rdev->sb_page);
  1585. sb->feature_map = 0;
  1586. sb->pad0 = 0;
  1587. sb->recovery_offset = cpu_to_le64(0);
  1588. memset(sb->pad3, 0, sizeof(sb->pad3));
  1589. sb->utime = cpu_to_le64((__u64)mddev->utime);
  1590. sb->events = cpu_to_le64(mddev->events);
  1591. if (mddev->in_sync)
  1592. sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
  1593. else
  1594. sb->resync_offset = cpu_to_le64(0);
  1595. sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
  1596. sb->raid_disks = cpu_to_le32(mddev->raid_disks);
  1597. sb->size = cpu_to_le64(mddev->dev_sectors);
  1598. sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
  1599. sb->level = cpu_to_le32(mddev->level);
  1600. sb->layout = cpu_to_le32(mddev->layout);
  1601. if (test_bit(WriteMostly, &rdev->flags))
  1602. sb->devflags |= WriteMostly1;
  1603. else
  1604. sb->devflags &= ~WriteMostly1;
  1605. sb->data_offset = cpu_to_le64(rdev->data_offset);
  1606. sb->data_size = cpu_to_le64(rdev->sectors);
  1607. if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
  1608. sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
  1609. sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
  1610. }
  1611. if (rdev->raid_disk >= 0 &&
  1612. !test_bit(In_sync, &rdev->flags)) {
  1613. sb->feature_map |=
  1614. cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
  1615. sb->recovery_offset =
  1616. cpu_to_le64(rdev->recovery_offset);
  1617. }
  1618. if (test_bit(Replacement, &rdev->flags))
  1619. sb->feature_map |=
  1620. cpu_to_le32(MD_FEATURE_REPLACEMENT);
  1621. if (mddev->reshape_position != MaxSector) {
  1622. sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
  1623. sb->reshape_position = cpu_to_le64(mddev->reshape_position);
  1624. sb->new_layout = cpu_to_le32(mddev->new_layout);
  1625. sb->delta_disks = cpu_to_le32(mddev->delta_disks);
  1626. sb->new_level = cpu_to_le32(mddev->new_level);
  1627. sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
  1628. if (mddev->delta_disks == 0 &&
  1629. mddev->reshape_backwards)
  1630. sb->feature_map
  1631. |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
  1632. if (rdev->new_data_offset != rdev->data_offset) {
  1633. sb->feature_map
  1634. |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
  1635. sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
  1636. - rdev->data_offset));
  1637. }
  1638. }
  1639. if (rdev->badblocks.count == 0)
  1640. /* Nothing to do for bad blocks*/ ;
  1641. else if (sb->bblog_offset == 0)
  1642. /* Cannot record bad blocks on this device */
  1643. md_error(mddev, rdev);
  1644. else {
  1645. struct badblocks *bb = &rdev->badblocks;
  1646. u64 *bbp = (u64 *)page_address(rdev->bb_page);
  1647. u64 *p = bb->page;
  1648. sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
  1649. if (bb->changed) {
  1650. unsigned seq;
  1651. retry:
  1652. seq = read_seqbegin(&bb->lock);
  1653. memset(bbp, 0xff, PAGE_SIZE);
  1654. for (i = 0 ; i < bb->count ; i++) {
  1655. u64 internal_bb = *p++;
  1656. u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
  1657. | BB_LEN(internal_bb));
  1658. *bbp++ = cpu_to_le64(store_bb);
  1659. }
  1660. bb->changed = 0;
  1661. if (read_seqretry(&bb->lock, seq))
  1662. goto retry;
  1663. bb->sector = (rdev->sb_start +
  1664. (int)le32_to_cpu(sb->bblog_offset));
  1665. bb->size = le16_to_cpu(sb->bblog_size);
  1666. }
  1667. }
  1668. max_dev = 0;
  1669. rdev_for_each(rdev2, mddev)
  1670. if (rdev2->desc_nr+1 > max_dev)
  1671. max_dev = rdev2->desc_nr+1;
  1672. if (max_dev > le32_to_cpu(sb->max_dev)) {
  1673. int bmask;
  1674. sb->max_dev = cpu_to_le32(max_dev);
  1675. rdev->sb_size = max_dev * 2 + 256;
  1676. bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
  1677. if (rdev->sb_size & bmask)
  1678. rdev->sb_size = (rdev->sb_size | bmask) + 1;
  1679. } else
  1680. max_dev = le32_to_cpu(sb->max_dev);
  1681. for (i=0; i<max_dev;i++)
  1682. sb->dev_roles[i] = cpu_to_le16(0xfffe);
  1683. rdev_for_each(rdev2, mddev) {
  1684. i = rdev2->desc_nr;
  1685. if (test_bit(Faulty, &rdev2->flags))
  1686. sb->dev_roles[i] = cpu_to_le16(0xfffe);
  1687. else if (test_bit(In_sync, &rdev2->flags))
  1688. sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
  1689. else if (rdev2->raid_disk >= 0)
  1690. sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
  1691. else
  1692. sb->dev_roles[i] = cpu_to_le16(0xffff);
  1693. }
  1694. sb->sb_csum = calc_sb_1_csum(sb);
  1695. }
  1696. static unsigned long long
  1697. super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
  1698. {
  1699. struct mdp_superblock_1 *sb;
  1700. sector_t max_sectors;
  1701. if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
  1702. return 0; /* component must fit device */
  1703. if (rdev->data_offset != rdev->new_data_offset)
  1704. return 0; /* too confusing */
  1705. if (rdev->sb_start < rdev->data_offset) {
  1706. /* minor versions 1 and 2; superblock before data */
  1707. max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
  1708. max_sectors -= rdev->data_offset;
  1709. if (!num_sectors || num_sectors > max_sectors)
  1710. num_sectors = max_sectors;
  1711. } else if (rdev->mddev->bitmap_info.offset) {
  1712. /* minor version 0 with bitmap we can't move */
  1713. return 0;
  1714. } else {
  1715. /* minor version 0; superblock after data */
  1716. sector_t sb_start;
  1717. sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
  1718. sb_start &= ~(sector_t)(4*2 - 1);
  1719. max_sectors = rdev->sectors + sb_start - rdev->sb_start;
  1720. if (!num_sectors || num_sectors > max_sectors)
  1721. num_sectors = max_sectors;
  1722. rdev->sb_start = sb_start;
  1723. }
  1724. sb = page_address(rdev->sb_page);
  1725. sb->data_size = cpu_to_le64(num_sectors);
  1726. sb->super_offset = rdev->sb_start;
  1727. sb->sb_csum = calc_sb_1_csum(sb);
  1728. md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
  1729. rdev->sb_page);
  1730. md_super_wait(rdev->mddev);
  1731. return num_sectors;
  1732. }
  1733. static int
  1734. super_1_allow_new_offset(struct md_rdev *rdev,
  1735. unsigned long long new_offset)
  1736. {
  1737. /* All necessary checks on new >= old have been done */
  1738. struct bitmap *bitmap;
  1739. if (new_offset >= rdev->data_offset)
  1740. return 1;
  1741. /* with 1.0 metadata, there is no metadata to tread on
  1742. * so we can always move back */
  1743. if (rdev->mddev->minor_version == 0)
  1744. return 1;
  1745. /* otherwise we must be sure not to step on
  1746. * any metadata, so stay:
  1747. * 36K beyond start of superblock
  1748. * beyond end of badblocks
  1749. * beyond write-intent bitmap
  1750. */
  1751. if (rdev->sb_start + (32+4)*2 > new_offset)
  1752. return 0;
  1753. bitmap = rdev->mddev->bitmap;
  1754. if (bitmap && !rdev->mddev->bitmap_info.file &&
  1755. rdev->sb_start + rdev->mddev->bitmap_info.offset +
  1756. bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
  1757. return 0;
  1758. if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
  1759. return 0;
  1760. return 1;
  1761. }
  1762. static struct super_type super_types[] = {
  1763. [0] = {
  1764. .name = "0.90.0",
  1765. .owner = THIS_MODULE,
  1766. .load_super = super_90_load,
  1767. .validate_super = super_90_validate,
  1768. .sync_super = super_90_sync,
  1769. .rdev_size_change = super_90_rdev_size_change,
  1770. .allow_new_offset = super_90_allow_new_offset,
  1771. },
  1772. [1] = {
  1773. .name = "md-1",
  1774. .owner = THIS_MODULE,
  1775. .load_super = super_1_load,
  1776. .validate_super = super_1_validate,
  1777. .sync_super = super_1_sync,
  1778. .rdev_size_change = super_1_rdev_size_change,
  1779. .allow_new_offset = super_1_allow_new_offset,
  1780. },
  1781. };
  1782. static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
  1783. {
  1784. if (mddev->sync_super) {
  1785. mddev->sync_super(mddev, rdev);
  1786. return;
  1787. }
  1788. BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
  1789. super_types[mddev->major_version].sync_super(mddev, rdev);
  1790. }
  1791. static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
  1792. {
  1793. struct md_rdev *rdev, *rdev2;
  1794. rcu_read_lock();
  1795. rdev_for_each_rcu(rdev, mddev1)
  1796. rdev_for_each_rcu(rdev2, mddev2)
  1797. if (rdev->bdev->bd_contains ==
  1798. rdev2->bdev->bd_contains) {
  1799. rcu_read_unlock();
  1800. return 1;
  1801. }
  1802. rcu_read_unlock();
  1803. return 0;
  1804. }
  1805. static LIST_HEAD(pending_raid_disks);
  1806. /*
  1807. * Try to register data integrity profile for an mddev
  1808. *
  1809. * This is called when an array is started and after a disk has been kicked
  1810. * from the array. It only succeeds if all working and active component devices
  1811. * are integrity capable with matching profiles.
  1812. */
  1813. int md_integrity_register(struct mddev *mddev)
  1814. {
  1815. struct md_rdev *rdev, *reference = NULL;
  1816. if (list_empty(&mddev->disks))
  1817. return 0; /* nothing to do */
  1818. if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
  1819. return 0; /* shouldn't register, or already is */
  1820. rdev_for_each(rdev, mddev) {
  1821. /* skip spares and non-functional disks */
  1822. if (test_bit(Faulty, &rdev->flags))
  1823. continue;
  1824. if (rdev->raid_disk < 0)
  1825. continue;
  1826. if (!reference) {
  1827. /* Use the first rdev as the reference */
  1828. reference = rdev;
  1829. continue;
  1830. }
  1831. /* does this rdev's profile match the reference profile? */
  1832. if (blk_integrity_compare(reference->bdev->bd_disk,
  1833. rdev->bdev->bd_disk) < 0)
  1834. return -EINVAL;
  1835. }
  1836. if (!reference || !bdev_get_integrity(reference->bdev))
  1837. return 0;
  1838. /*
  1839. * All component devices are integrity capable and have matching
  1840. * profiles, register the common profile for the md device.
  1841. */
  1842. if (blk_integrity_register(mddev->gendisk,
  1843. bdev_get_integrity(reference->bdev)) != 0) {
  1844. printk(KERN_ERR "md: failed to register integrity for %s\n",
  1845. mdname(mddev));
  1846. return -EINVAL;
  1847. }
  1848. printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
  1849. if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
  1850. printk(KERN_ERR "md: failed to create integrity pool for %s\n",
  1851. mdname(mddev));
  1852. return -EINVAL;
  1853. }
  1854. return 0;
  1855. }
  1856. EXPORT_SYMBOL(md_integrity_register);
  1857. /* Disable data integrity if non-capable/non-matching disk is being added */
  1858. void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
  1859. {
  1860. struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
  1861. struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
  1862. if (!bi_mddev) /* nothing to do */
  1863. return;
  1864. if (rdev->raid_disk < 0) /* skip spares */
  1865. return;
  1866. if (bi_rdev && blk_integrity_compare(mddev->gendisk,
  1867. rdev->bdev->bd_disk) >= 0)
  1868. return;
  1869. printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
  1870. blk_integrity_unregister(mddev->gendisk);
  1871. }
  1872. EXPORT_SYMBOL(md_integrity_add_rdev);
  1873. static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev)
  1874. {
  1875. char b[BDEVNAME_SIZE];
  1876. struct kobject *ko;
  1877. char *s;
  1878. int err;
  1879. if (rdev->mddev) {
  1880. MD_BUG();
  1881. return -EINVAL;
  1882. }
  1883. /* prevent duplicates */
  1884. if (find_rdev(mddev, rdev->bdev->bd_dev))
  1885. return -EEXIST;
  1886. /* make sure rdev->sectors exceeds mddev->dev_sectors */
  1887. if (rdev->sectors && (mddev->dev_sectors == 0 ||
  1888. rdev->sectors < mddev->dev_sectors)) {
  1889. if (mddev->pers) {
  1890. /* Cannot change size, so fail
  1891. * If mddev->level <= 0, then we don't care
  1892. * about aligning sizes (e.g. linear)
  1893. */
  1894. if (mddev->level > 0)
  1895. return -ENOSPC;
  1896. } else
  1897. mddev->dev_sectors = rdev->sectors;
  1898. }
  1899. /* Verify rdev->desc_nr is unique.
  1900. * If it is -1, assign a free number, else
  1901. * check number is not in use
  1902. */
  1903. if (rdev->desc_nr < 0) {
  1904. int choice = 0;
  1905. if (mddev->pers) choice = mddev->raid_disks;
  1906. while (find_rdev_nr(mddev, choice))
  1907. choice++;
  1908. rdev->desc_nr = choice;
  1909. } else {
  1910. if (find_rdev_nr(mddev, rdev->desc_nr))
  1911. return -EBUSY;
  1912. }
  1913. if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
  1914. printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
  1915. mdname(mddev), mddev->max_disks);
  1916. return -EBUSY;
  1917. }
  1918. bdevname(rdev->bdev,b);
  1919. while ( (s=strchr(b, '/')) != NULL)
  1920. *s = '!';
  1921. rdev->mddev = mddev;
  1922. printk(KERN_INFO "md: bind<%s>\n", b);
  1923. if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
  1924. goto fail;
  1925. ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
  1926. if (sysfs_create_link(&rdev->kobj, ko, "block"))
  1927. /* failure here is OK */;
  1928. rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
  1929. list_add_rcu(&rdev->same_set, &mddev->disks);
  1930. bd_link_disk_holder(rdev->bdev, mddev->gendisk);
  1931. /* May as well allow recovery to be retried once */
  1932. mddev->recovery_disabled++;
  1933. return 0;
  1934. fail:
  1935. printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
  1936. b, mdname(mddev));
  1937. return err;
  1938. }
  1939. static void md_delayed_delete(struct work_struct *ws)
  1940. {
  1941. struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
  1942. kobject_del(&rdev->kobj);
  1943. kobject_put(&rdev->kobj);
  1944. }
  1945. static void unbind_rdev_from_array(struct md_rdev * rdev)
  1946. {
  1947. char b[BDEVNAME_SIZE];
  1948. if (!rdev->mddev) {
  1949. MD_BUG();
  1950. return;
  1951. }
  1952. bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
  1953. list_del_rcu(&rdev->same_set);
  1954. printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
  1955. rdev->mddev = NULL;
  1956. sysfs_remove_link(&rdev->kobj, "block");
  1957. sysfs_put(rdev->sysfs_state);
  1958. rdev->sysfs_state = NULL;
  1959. rdev->badblocks.count = 0;
  1960. /* We need to delay this, otherwise we can deadlock when
  1961. * writing to 'remove' to "dev/state". We also need
  1962. * to delay it due to rcu usage.
  1963. */
  1964. synchronize_rcu();
  1965. INIT_WORK(&rdev->del_work, md_delayed_delete);
  1966. kobject_get(&rdev->kobj);
  1967. queue_work(md_misc_wq, &rdev->del_work);
  1968. }
  1969. /*
  1970. * prevent the device from being mounted, repartitioned or
  1971. * otherwise reused by a RAID array (or any other kernel
  1972. * subsystem), by bd_claiming the device.
  1973. */
  1974. static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
  1975. {
  1976. int err = 0;
  1977. struct block_device *bdev;
  1978. char b[BDEVNAME_SIZE];
  1979. bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
  1980. shared ? (struct md_rdev *)lock_rdev : rdev);
  1981. if (IS_ERR(bdev)) {
  1982. printk(KERN_ERR "md: could not open %s.\n",
  1983. __bdevname(dev, b));
  1984. return PTR_ERR(bdev);
  1985. }
  1986. rdev->bdev = bdev;
  1987. return err;
  1988. }
  1989. static void unlock_rdev(struct md_rdev *rdev)
  1990. {
  1991. struct block_device *bdev = rdev->bdev;
  1992. rdev->bdev = NULL;
  1993. if (!bdev)
  1994. MD_BUG();
  1995. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1996. }
  1997. void md_autodetect_dev(dev_t dev);
  1998. static void export_rdev(struct md_rdev * rdev)
  1999. {
  2000. char b[BDEVNAME_SIZE];
  2001. printk(KERN_INFO "md: export_rdev(%s)\n",
  2002. bdevname(rdev->bdev,b));
  2003. if (rdev->mddev)
  2004. MD_BUG();
  2005. md_rdev_clear(rdev);
  2006. #ifndef MODULE
  2007. if (test_bit(AutoDetected, &rdev->flags))
  2008. md_autodetect_dev(rdev->bdev->bd_dev);
  2009. #endif
  2010. unlock_rdev(rdev);
  2011. kobject_put(&rdev->kobj);
  2012. }
  2013. static void kick_rdev_from_array(struct md_rdev * rdev)
  2014. {
  2015. unbind_rdev_from_array(rdev);
  2016. export_rdev(rdev);
  2017. }
  2018. static void export_array(struct mddev *mddev)
  2019. {
  2020. struct md_rdev *rdev, *tmp;
  2021. rdev_for_each_safe(rdev, tmp, mddev) {
  2022. if (!rdev->mddev) {
  2023. MD_BUG();
  2024. continue;
  2025. }
  2026. kick_rdev_from_array(rdev);
  2027. }
  2028. if (!list_empty(&mddev->disks))
  2029. MD_BUG();
  2030. mddev->raid_disks = 0;
  2031. mddev->major_version = 0;
  2032. }
  2033. static void print_desc(mdp_disk_t *desc)
  2034. {
  2035. printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
  2036. desc->major,desc->minor,desc->raid_disk,desc->state);
  2037. }
  2038. static void print_sb_90(mdp_super_t *sb)
  2039. {
  2040. int i;
  2041. printk(KERN_INFO
  2042. "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
  2043. sb->major_version, sb->minor_version, sb->patch_version,
  2044. sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
  2045. sb->ctime);
  2046. printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
  2047. sb->level, sb->size, sb->nr_disks, sb->raid_disks,
  2048. sb->md_minor, sb->layout, sb->chunk_size);
  2049. printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
  2050. " FD:%d SD:%d CSUM:%08x E:%08lx\n",
  2051. sb->utime, sb->state, sb->active_disks, sb->working_disks,
  2052. sb->failed_disks, sb->spare_disks,
  2053. sb->sb_csum, (unsigned long)sb->events_lo);
  2054. printk(KERN_INFO);
  2055. for (i = 0; i < MD_SB_DISKS; i++) {
  2056. mdp_disk_t *desc;
  2057. desc = sb->disks + i;
  2058. if (desc->number || desc->major || desc->minor ||
  2059. desc->raid_disk || (desc->state && (desc->state != 4))) {
  2060. printk(" D %2d: ", i);
  2061. print_desc(desc);
  2062. }
  2063. }
  2064. printk(KERN_INFO "md: THIS: ");
  2065. print_desc(&sb->this_disk);
  2066. }
  2067. static void print_sb_1(struct mdp_superblock_1 *sb)
  2068. {
  2069. __u8 *uuid;
  2070. uuid = sb->set_uuid;
  2071. printk(KERN_INFO
  2072. "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
  2073. "md: Name: \"%s\" CT:%llu\n",
  2074. le32_to_cpu(sb->major_version),
  2075. le32_to_cpu(sb->feature_map),
  2076. uuid,
  2077. sb->set_name,
  2078. (unsigned long long)le64_to_cpu(sb->ctime)
  2079. & MD_SUPERBLOCK_1_TIME_SEC_MASK);
  2080. uuid = sb->device_uuid;
  2081. printk(KERN_INFO
  2082. "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
  2083. " RO:%llu\n"
  2084. "md: Dev:%08x UUID: %pU\n"
  2085. "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
  2086. "md: (MaxDev:%u) \n",
  2087. le32_to_cpu(sb->level),
  2088. (unsigned long long)le64_to_cpu(sb->size),
  2089. le32_to_cpu(sb->raid_disks),
  2090. le32_to_cpu(sb->layout),
  2091. le32_to_cpu(sb->chunksize),
  2092. (unsigned long long)le64_to_cpu(sb->data_offset),
  2093. (unsigned long long)le64_to_cpu(sb->data_size),
  2094. (unsigned long long)le64_to_cpu(sb->super_offset),
  2095. (unsigned long long)le64_to_cpu(sb->recovery_offset),
  2096. le32_to_cpu(sb->dev_number),
  2097. uuid,
  2098. sb->devflags,
  2099. (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
  2100. (unsigned long long)le64_to_cpu(sb->events),
  2101. (unsigned long long)le64_to_cpu(sb->resync_offset),
  2102. le32_to_cpu(sb->sb_csum),
  2103. le32_to_cpu(sb->max_dev)
  2104. );
  2105. }
  2106. static void print_rdev(struct md_rdev *rdev, int major_version)
  2107. {
  2108. char b[BDEVNAME_SIZE];
  2109. printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
  2110. bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
  2111. test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
  2112. rdev->desc_nr);
  2113. if (rdev->sb_loaded) {
  2114. printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
  2115. switch (major_version) {
  2116. case 0:
  2117. print_sb_90(page_address(rdev->sb_page));
  2118. break;
  2119. case 1:
  2120. print_sb_1(page_address(rdev->sb_page));
  2121. break;
  2122. }
  2123. } else
  2124. printk(KERN_INFO "md: no rdev superblock!\n");
  2125. }
  2126. static void md_print_devices(void)
  2127. {
  2128. struct list_head *tmp;
  2129. struct md_rdev *rdev;
  2130. struct mddev *mddev;
  2131. char b[BDEVNAME_SIZE];
  2132. printk("\n");
  2133. printk("md: **********************************\n");
  2134. printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
  2135. printk("md: **********************************\n");
  2136. for_each_mddev(mddev, tmp) {
  2137. if (mddev->bitmap)
  2138. bitmap_print_sb(mddev->bitmap);
  2139. else
  2140. printk("%s: ", mdname(mddev));
  2141. rdev_for_each(rdev, mddev)
  2142. printk("<%s>", bdevname(rdev->bdev,b));
  2143. printk("\n");
  2144. rdev_for_each(rdev, mddev)
  2145. print_rdev(rdev, mddev->major_version);
  2146. }
  2147. printk("md: **********************************\n");
  2148. printk("\n");
  2149. }
  2150. static void sync_sbs(struct mddev * mddev, int nospares)
  2151. {
  2152. /* Update each superblock (in-memory image), but
  2153. * if we are allowed to, skip spares which already
  2154. * have the right event counter, or have one earlier
  2155. * (which would mean they aren't being marked as dirty
  2156. * with the rest of the array)
  2157. */
  2158. struct md_rdev *rdev;
  2159. rdev_for_each(rdev, mddev) {
  2160. if (rdev->sb_events == mddev->events ||
  2161. (nospares &&
  2162. rdev->raid_disk < 0 &&
  2163. rdev->sb_events+1 == mddev->events)) {
  2164. /* Don't update this superblock */
  2165. rdev->sb_loaded = 2;
  2166. } else {
  2167. sync_super(mddev, rdev);
  2168. rdev->sb_loaded = 1;
  2169. }
  2170. }
  2171. }
  2172. static void md_update_sb(struct mddev * mddev, int force_change)
  2173. {
  2174. struct md_rdev *rdev;
  2175. int sync_req;
  2176. int nospares = 0;
  2177. int any_badblocks_changed = 0;
  2178. repeat:
  2179. /* First make sure individual recovery_offsets are correct */
  2180. rdev_for_each(rdev, mddev) {
  2181. if (rdev->raid_disk >= 0 &&
  2182. mddev->delta_disks >= 0 &&
  2183. !test_bit(In_sync, &rdev->flags) &&
  2184. mddev->curr_resync_completed > rdev->recovery_offset)
  2185. rdev->recovery_offset = mddev->curr_resync_completed;
  2186. }
  2187. if (!mddev->persistent) {
  2188. clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
  2189. clear_bit(MD_CHANGE_DEVS, &mddev->flags);
  2190. if (!mddev->external) {
  2191. clear_bit(MD_CHANGE_PENDING, &mddev->flags);
  2192. rdev_for_each(rdev, mddev) {
  2193. if (rdev->badblocks.changed) {
  2194. rdev->badblocks.changed = 0;
  2195. md_ack_all_badblocks(&rdev->badblocks);
  2196. md_error(mddev, rdev);
  2197. }
  2198. clear_bit(Blocked, &rdev->flags);
  2199. clear_bit(BlockedBadBlocks, &rdev->flags);
  2200. wake_up(&rdev->blocked_wait);
  2201. }
  2202. }
  2203. wake_up(&mddev->sb_wait);
  2204. return;
  2205. }
  2206. spin_lock_irq(&mddev->write_lock);
  2207. mddev->utime = get_seconds();
  2208. if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
  2209. force_change = 1;
  2210. if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
  2211. /* just a clean<-> dirty transition, possibly leave spares alone,
  2212. * though if events isn't the right even/odd, we will have to do
  2213. * spares after all
  2214. */
  2215. nospares = 1;
  2216. if (force_change)
  2217. nospares = 0;
  2218. if (mddev->degraded)
  2219. /* If the array is degraded, then skipping spares is both
  2220. * dangerous and fairly pointless.
  2221. * Dangerous because a device that was removed from the array
  2222. * might have a event_count that still looks up-to-date,
  2223. * so it can be re-added without a resync.
  2224. * Pointless because if there are any spares to skip,
  2225. * then a recovery will happen and soon that array won't
  2226. * be degraded any more and the spare can go back to sleep then.
  2227. */
  2228. nospares = 0;
  2229. sync_req = mddev->in_sync;
  2230. /* If this is just a dirty<->clean transition, and the array is clean
  2231. * and 'events' is odd, we can roll back to the previous clean state */
  2232. if (nospares
  2233. && (mddev->in_sync && mddev->recovery_cp == MaxSector)
  2234. && mddev->can_decrease_events
  2235. && mddev->events != 1) {
  2236. mddev->events--;
  2237. mddev->can_decrease_events = 0;
  2238. } else {
  2239. /* otherwise we have to go forward and ... */
  2240. mddev->events ++;
  2241. mddev->can_decrease_events = nospares;
  2242. }
  2243. if (!mddev->events) {
  2244. /*
  2245. * oops, this 64-bit counter should never wrap.
  2246. * Either we are in around ~1 trillion A.C., assuming
  2247. * 1 reboot per second, or we have a bug:
  2248. */
  2249. MD_BUG();
  2250. mddev->events --;
  2251. }
  2252. rdev_for_each(rdev, mddev) {
  2253. if (rdev->badblocks.changed)
  2254. any_badblocks_changed++;
  2255. if (test_bit(Faulty, &rdev->flags))
  2256. set_bit(FaultRecorded, &rdev->flags);
  2257. }
  2258. sync_sbs(mddev, nospares);
  2259. spin_unlock_irq(&mddev->write_lock);
  2260. pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
  2261. mdname(mddev), mddev->in_sync);
  2262. bitmap_update_sb(mddev->bitmap);
  2263. rdev_for_each(rdev, mddev) {
  2264. char b[BDEVNAME_SIZE];
  2265. if (rdev->sb_loaded != 1)
  2266. continue; /* no noise on spare devices */
  2267. if (!test_bit(Faulty, &rdev->flags) &&
  2268. rdev->saved_raid_disk == -1) {
  2269. md_super_write(mddev,rdev,
  2270. rdev->sb_start, rdev->sb_size,
  2271. rdev->sb_page);
  2272. pr_debug("md: (write) %s's sb offset: %llu\n",
  2273. bdevname(rdev->bdev, b),
  2274. (unsigned long long)rdev->sb_start);
  2275. rdev->sb_events = mddev->events;
  2276. if (rdev->badblocks.size) {
  2277. md_super_write(mddev, rdev,
  2278. rdev->badblocks.sector,
  2279. rdev->badblocks.size << 9,
  2280. rdev->bb_page);
  2281. rdev->badblocks.size = 0;
  2282. }
  2283. } else if (test_bit(Faulty, &rdev->flags))
  2284. pr_debug("md: %s (skipping faulty)\n",
  2285. bdevname(rdev->bdev, b));
  2286. else
  2287. pr_debug("(skipping incremental s/r ");
  2288. if (mddev->level == LEVEL_MULTIPATH)
  2289. /* only need to write one superblock... */
  2290. break;
  2291. }
  2292. md_super_wait(mddev);
  2293. /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
  2294. spin_lock_irq(&mddev->write_lock);
  2295. if (mddev->in_sync != sync_req ||
  2296. test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
  2297. /* have to write it out again */
  2298. spin_unlock_irq(&mddev->write_lock);
  2299. goto repeat;
  2300. }
  2301. clear_bit(MD_CHANGE_PENDING, &mddev->flags);
  2302. spin_unlock_irq(&mddev->write_lock);
  2303. wake_up(&mddev->sb_wait);
  2304. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  2305. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  2306. rdev_for_each(rdev, mddev) {
  2307. if (test_and_clear_bit(FaultRecorded, &rdev->flags))
  2308. clear_bit(Blocked, &rdev->flags);
  2309. if (any_badblocks_changed)
  2310. md_ack_all_badblocks(&rdev->badblocks);
  2311. clear_bit(BlockedBadBlocks, &rdev->flags);
  2312. wake_up(&rdev->blocked_wait);
  2313. }
  2314. }
  2315. /* words written to sysfs files may, or may not, be \n terminated.
  2316. * We want to accept with case. For this we use cmd_match.
  2317. */
  2318. static int cmd_match(const char *cmd, const char *str)
  2319. {
  2320. /* See if cmd, written into a sysfs file, matches
  2321. * str. They must either be the same, or cmd can
  2322. * have a trailing newline
  2323. */
  2324. while (*cmd && *str && *cmd == *str) {
  2325. cmd++;
  2326. str++;
  2327. }
  2328. if (*cmd == '\n')
  2329. cmd++;
  2330. if (*str || *cmd)
  2331. return 0;
  2332. return 1;
  2333. }
  2334. struct rdev_sysfs_entry {
  2335. struct attribute attr;
  2336. ssize_t (*show)(struct md_rdev *, char *);
  2337. ssize_t (*store)(struct md_rdev *, const char *, size_t);
  2338. };
  2339. static ssize_t
  2340. state_show(struct md_rdev *rdev, char *page)
  2341. {
  2342. char *sep = "";
  2343. size_t len = 0;
  2344. if (test_bit(Faulty, &rdev->flags) ||
  2345. rdev->badblocks.unacked_exist) {
  2346. len+= sprintf(page+len, "%sfaulty",sep);
  2347. sep = ",";
  2348. }
  2349. if (test_bit(In_sync, &rdev->flags)) {
  2350. len += sprintf(page+len, "%sin_sync",sep);
  2351. sep = ",";
  2352. }
  2353. if (test_bit(WriteMostly, &rdev->flags)) {
  2354. len += sprintf(page+len, "%swrite_mostly",sep);
  2355. sep = ",";
  2356. }
  2357. if (test_bit(Blocked, &rdev->flags) ||
  2358. (rdev->badblocks.unacked_exist
  2359. && !test_bit(Faulty, &rdev->flags))) {
  2360. len += sprintf(page+len, "%sblocked", sep);
  2361. sep = ",";
  2362. }
  2363. if (!test_bit(Faulty, &rdev->flags) &&
  2364. !test_bit(In_sync, &rdev->flags)) {
  2365. len += sprintf(page+len, "%sspare", sep);
  2366. sep = ",";
  2367. }
  2368. if (test_bit(WriteErrorSeen, &rdev->flags)) {
  2369. len += sprintf(page+len, "%swrite_error", sep);
  2370. sep = ",";
  2371. }
  2372. if (test_bit(WantReplacement, &rdev->flags)) {
  2373. len += sprintf(page+len, "%swant_replacement", sep);
  2374. sep = ",";
  2375. }
  2376. if (test_bit(Replacement, &rdev->flags)) {
  2377. len += sprintf(page+len, "%sreplacement", sep);
  2378. sep = ",";
  2379. }
  2380. return len+sprintf(page+len, "\n");
  2381. }
  2382. static ssize_t
  2383. state_store(struct md_rdev *rdev, const char *buf, size_t len)
  2384. {
  2385. /* can write
  2386. * faulty - simulates an error
  2387. * remove - disconnects the device
  2388. * writemostly - sets write_mostly
  2389. * -writemostly - clears write_mostly
  2390. * blocked - sets the Blocked flags
  2391. * -blocked - clears the Blocked and possibly simulates an error
  2392. * insync - sets Insync providing device isn't active
  2393. * write_error - sets WriteErrorSeen
  2394. * -write_error - clears WriteErrorSeen
  2395. */
  2396. int err = -EINVAL;
  2397. if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
  2398. md_error(rdev->mddev, rdev);
  2399. if (test_bit(Faulty, &rdev->flags))
  2400. err = 0;
  2401. else
  2402. err = -EBUSY;
  2403. } else if (cmd_match(buf, "remove")) {
  2404. if (rdev->raid_disk >= 0)
  2405. err = -EBUSY;
  2406. else {
  2407. struct mddev *mddev = rdev->mddev;
  2408. kick_rdev_from_array(rdev);
  2409. if (mddev->pers)
  2410. md_update_sb(mddev, 1);
  2411. md_new_event(mddev);
  2412. err = 0;
  2413. }
  2414. } else if (cmd_match(buf, "writemostly")) {
  2415. set_bit(WriteMostly, &rdev->flags);
  2416. err = 0;
  2417. } else if (cmd_match(buf, "-writemostly")) {
  2418. clear_bit(WriteMostly, &rdev->flags);
  2419. err = 0;
  2420. } else if (cmd_match(buf, "blocked")) {
  2421. set_bit(Blocked, &rdev->flags);
  2422. err = 0;
  2423. } else if (cmd_match(buf, "-blocked")) {
  2424. if (!test_bit(Faulty, &rdev->flags) &&
  2425. rdev->badblocks.unacked_exist) {
  2426. /* metadata handler doesn't understand badblocks,
  2427. * so we need to fail the device
  2428. */
  2429. md_error(rdev->mddev, rdev);
  2430. }
  2431. clear_bit(Blocked, &rdev->flags);
  2432. clear_bit(BlockedBadBlocks, &rdev->flags);
  2433. wake_up(&rdev->blocked_wait);
  2434. set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
  2435. md_wakeup_thread(rdev->mddev->thread);
  2436. err = 0;
  2437. } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
  2438. set_bit(In_sync, &rdev->flags);
  2439. err = 0;
  2440. } else if (cmd_match(buf, "write_error")) {
  2441. set_bit(WriteErrorSeen, &rdev->flags);
  2442. err = 0;
  2443. } else if (cmd_match(buf, "-write_error")) {
  2444. clear_bit(WriteErrorSeen, &rdev->flags);
  2445. err = 0;
  2446. } else if (cmd_match(buf, "want_replacement")) {
  2447. /* Any non-spare device that is not a replacement can
  2448. * become want_replacement at any time, but we then need to
  2449. * check if recovery is needed.
  2450. */
  2451. if (rdev->raid_disk >= 0 &&
  2452. !test_bit(Replacement, &rdev->flags))
  2453. set_bit(WantReplacement, &rdev->flags);
  2454. set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
  2455. md_wakeup_thread(rdev->mddev->thread);
  2456. err = 0;
  2457. } else if (cmd_match(buf, "-want_replacement")) {
  2458. /* Clearing 'want_replacement' is always allowed.
  2459. * Once replacements starts it is too late though.
  2460. */
  2461. err = 0;
  2462. clear_bit(WantReplacement, &rdev->flags);
  2463. } else if (cmd_match(buf, "replacement")) {
  2464. /* Can only set a device as a replacement when array has not
  2465. * yet been started. Once running, replacement is automatic
  2466. * from spares, or by assigning 'slot'.
  2467. */
  2468. if (rdev->mddev->pers)
  2469. err = -EBUSY;
  2470. else {
  2471. set_bit(Replacement, &rdev->flags);
  2472. err = 0;
  2473. }
  2474. } else if (cmd_match(buf, "-replacement")) {
  2475. /* Similarly, can only clear Replacement before start */
  2476. if (rdev->mddev->pers)
  2477. err = -EBUSY;
  2478. else {
  2479. clear_bit(Replacement, &rdev->flags);
  2480. err = 0;
  2481. }
  2482. }
  2483. if (!err)
  2484. sysfs_notify_dirent_safe(rdev->sysfs_state);
  2485. return err ? err : len;
  2486. }
  2487. static struct rdev_sysfs_entry rdev_state =
  2488. __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
  2489. static ssize_t
  2490. errors_show(struct md_rdev *rdev, char *page)
  2491. {
  2492. return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
  2493. }
  2494. static ssize_t
  2495. errors_store(struct md_rdev *rdev, const char *buf, size_t len)
  2496. {
  2497. char *e;
  2498. unsigned long n = simple_strtoul(buf, &e, 10);
  2499. if (*buf && (*e == 0 || *e == '\n')) {
  2500. atomic_set(&rdev->corrected_errors, n);
  2501. return len;
  2502. }
  2503. return -EINVAL;
  2504. }
  2505. static struct rdev_sysfs_entry rdev_errors =
  2506. __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
  2507. static ssize_t
  2508. slot_show(struct md_rdev *rdev, char *page)
  2509. {
  2510. if (rdev->raid_disk < 0)
  2511. return sprintf(page, "none\n");
  2512. else
  2513. return sprintf(page, "%d\n", rdev->raid_disk);
  2514. }
  2515. static ssize_t
  2516. slot_store(struct md_rdev *rdev, const char *buf, size_t len)
  2517. {
  2518. char *e;
  2519. int err;
  2520. int slot = simple_strtoul(buf, &e, 10);
  2521. if (strncmp(buf, "none", 4)==0)
  2522. slot = -1;
  2523. else if (e==buf || (*e && *e!= '\n'))
  2524. return -EINVAL;
  2525. if (rdev->mddev->pers && slot == -1) {
  2526. /* Setting 'slot' on an active array requires also
  2527. * updating the 'rd%d' link, and communicating
  2528. * with the personality with ->hot_*_disk.
  2529. * For now we only support removing
  2530. * failed/spare devices. This normally happens automatically,
  2531. * but not when the metadata is externally managed.
  2532. */
  2533. if (rdev->raid_disk == -1)
  2534. return -EEXIST;
  2535. /* personality does all needed checks */
  2536. if (rdev->mddev->pers->hot_remove_disk == NULL)
  2537. return -EINVAL;
  2538. err = rdev->mddev->pers->
  2539. hot_remove_disk(rdev->mddev, rdev);
  2540. if (err)
  2541. return err;
  2542. sysfs_unlink_rdev(rdev->mddev, rdev);
  2543. rdev->raid_disk = -1;
  2544. set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
  2545. md_wakeup_thread(rdev->mddev->thread);
  2546. } else if (rdev->mddev->pers) {
  2547. /* Activating a spare .. or possibly reactivating
  2548. * if we ever get bitmaps working here.
  2549. */
  2550. if (rdev->raid_disk != -1)
  2551. return -EBUSY;
  2552. if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
  2553. return -EBUSY;
  2554. if (rdev->mddev->pers->hot_add_disk == NULL)
  2555. return -EINVAL;
  2556. if (slot >= rdev->mddev->raid_disks &&
  2557. slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
  2558. return -ENOSPC;
  2559. rdev->raid_disk = slot;
  2560. if (test_bit(In_sync, &rdev->flags))
  2561. rdev->saved_raid_disk = slot;
  2562. else
  2563. rdev->saved_raid_disk = -1;
  2564. clear_bit(In_sync, &rdev->flags);
  2565. err = rdev->mddev->pers->
  2566. hot_add_disk(rdev->mddev, rdev);
  2567. if (err) {
  2568. rdev->raid_disk = -1;
  2569. return err;
  2570. } else
  2571. sysfs_notify_dirent_safe(rdev->sysfs_state);
  2572. if (sysfs_link_rdev(rdev->mddev, rdev))
  2573. /* failure here is OK */;
  2574. /* don't wakeup anyone, leave that to userspace. */
  2575. } else {
  2576. if (slot >= rdev->mddev->raid_disks &&
  2577. slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
  2578. return -ENOSPC;
  2579. rdev->raid_disk = slot;
  2580. /* assume it is working */
  2581. clear_bit(Faulty, &rdev->flags);
  2582. clear_bit(WriteMostly, &rdev->flags);
  2583. set_bit(In_sync, &rdev->flags);
  2584. sysfs_notify_dirent_safe(rdev->sysfs_state);
  2585. }
  2586. return len;
  2587. }
  2588. static struct rdev_sysfs_entry rdev_slot =
  2589. __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
  2590. static ssize_t
  2591. offset_show(struct md_rdev *rdev, char *page)
  2592. {
  2593. return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
  2594. }
  2595. static ssize_t
  2596. offset_store(struct md_rdev *rdev, const char *buf, size_t len)
  2597. {
  2598. unsigned long long offset;
  2599. if (strict_strtoull(buf, 10, &offset) < 0)
  2600. return -EINVAL;
  2601. if (rdev->mddev->pers && rdev->raid_disk >= 0)
  2602. return -EBUSY;
  2603. if (rdev->sectors && rdev->mddev->external)
  2604. /* Must set offset before size, so overlap checks
  2605. * can be sane */
  2606. return -EBUSY;
  2607. rdev->data_offset = offset;
  2608. rdev->new_data_offset = offset;
  2609. return len;
  2610. }
  2611. static struct rdev_sysfs_entry rdev_offset =
  2612. __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
  2613. static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
  2614. {
  2615. return sprintf(page, "%llu\n",
  2616. (unsigned long long)rdev->new_data_offset);
  2617. }
  2618. static ssize_t new_offset_store(struct md_rdev *rdev,
  2619. const char *buf, size_t len)
  2620. {
  2621. unsigned long long new_offset;
  2622. struct mddev *mddev = rdev->mddev;
  2623. if (strict_strtoull(buf, 10, &new_offset) < 0)
  2624. return -EINVAL;
  2625. if (mddev->sync_thread)
  2626. return -EBUSY;
  2627. if (new_offset == rdev->data_offset)
  2628. /* reset is always permitted */
  2629. ;
  2630. else if (new_offset > rdev->data_offset) {
  2631. /* must not push array size beyond rdev_sectors */
  2632. if (new_offset - rdev->data_offset
  2633. + mddev->dev_sectors > rdev->sectors)
  2634. return -E2BIG;
  2635. }
  2636. /* Metadata worries about other space details. */
  2637. /* decreasing the offset is inconsistent with a backwards
  2638. * reshape.
  2639. */
  2640. if (new_offset < rdev->data_offset &&
  2641. mddev->reshape_backwards)
  2642. return -EINVAL;
  2643. /* Increasing offset is inconsistent with forwards
  2644. * reshape. reshape_direction should be set to
  2645. * 'backwards' first.
  2646. */
  2647. if (new_offset > rdev->data_offset &&
  2648. !mddev->reshape_backwards)
  2649. return -EINVAL;
  2650. if (mddev->pers && mddev->persistent &&
  2651. !super_types[mddev->major_version]
  2652. .allow_new_offset(rdev, new_offset))
  2653. return -E2BIG;
  2654. rdev->new_data_offset = new_offset;
  2655. if (new_offset > rdev->data_offset)
  2656. mddev->reshape_backwards = 1;
  2657. else if (new_offset < rdev->data_offset)
  2658. mddev->reshape_backwards = 0;
  2659. return len;
  2660. }
  2661. static struct rdev_sysfs_entry rdev_new_offset =
  2662. __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
  2663. static ssize_t
  2664. rdev_size_show(struct md_rdev *rdev, char *page)
  2665. {
  2666. return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
  2667. }
  2668. static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
  2669. {
  2670. /* check if two start/length pairs overlap */
  2671. if (s1+l1 <= s2)
  2672. return 0;
  2673. if (s2+l2 <= s1)
  2674. return 0;
  2675. return 1;
  2676. }
  2677. static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
  2678. {
  2679. unsigned long long blocks;
  2680. sector_t new;
  2681. if (strict_strtoull(buf, 10, &blocks) < 0)
  2682. return -EINVAL;
  2683. if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
  2684. return -EINVAL; /* sector conversion overflow */
  2685. new = blocks * 2;
  2686. if (new != blocks * 2)
  2687. return -EINVAL; /* unsigned long long to sector_t overflow */
  2688. *sectors = new;
  2689. return 0;
  2690. }
  2691. static ssize_t
  2692. rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
  2693. {
  2694. struct mddev *my_mddev = rdev->mddev;
  2695. sector_t oldsectors = rdev->sectors;
  2696. sector_t sectors;
  2697. if (strict_blocks_to_sectors(buf, &sectors) < 0)
  2698. return -EINVAL;
  2699. if (rdev->data_offset != rdev->new_data_offset)
  2700. return -EINVAL; /* too confusing */
  2701. if (my_mddev->pers && rdev->raid_disk >= 0) {
  2702. if (my_mddev->persistent) {
  2703. sectors = super_types[my_mddev->major_version].
  2704. rdev_size_change(rdev, sectors);
  2705. if (!sectors)
  2706. return -EBUSY;
  2707. } else if (!sectors)
  2708. sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
  2709. rdev->data_offset;
  2710. }
  2711. if (sectors < my_mddev->dev_sectors)
  2712. return -EINVAL; /* component must fit device */
  2713. rdev->sectors = sectors;
  2714. if (sectors > oldsectors && my_mddev->external) {
  2715. /* need to check that all other rdevs with the same ->bdev
  2716. * do not overlap. We need to unlock the mddev to avoid
  2717. * a deadlock. We have already changed rdev->sectors, and if
  2718. * we have to change it back, we will have the lock again.
  2719. */
  2720. struct mddev *mddev;
  2721. int overlap = 0;
  2722. struct list_head *tmp;
  2723. mddev_unlock(my_mddev);
  2724. for_each_mddev(mddev, tmp) {
  2725. struct md_rdev *rdev2;
  2726. mddev_lock(mddev);
  2727. rdev_for_each(rdev2, mddev)
  2728. if (rdev->bdev == rdev2->bdev &&
  2729. rdev != rdev2 &&
  2730. overlaps(rdev->data_offset, rdev->sectors,
  2731. rdev2->data_offset,
  2732. rdev2->sectors)) {
  2733. overlap = 1;
  2734. break;
  2735. }
  2736. mddev_unlock(mddev);
  2737. if (overlap) {
  2738. mddev_put(mddev);
  2739. break;
  2740. }
  2741. }
  2742. mddev_lock(my_mddev);
  2743. if (overlap) {
  2744. /* Someone else could have slipped in a size
  2745. * change here, but doing so is just silly.
  2746. * We put oldsectors back because we *know* it is
  2747. * safe, and trust userspace not to race with
  2748. * itself
  2749. */
  2750. rdev->sectors = oldsectors;
  2751. return -EBUSY;
  2752. }
  2753. }
  2754. return len;
  2755. }
  2756. static struct rdev_sysfs_entry rdev_size =
  2757. __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
  2758. static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
  2759. {
  2760. unsigned long long recovery_start = rdev->recovery_offset;
  2761. if (test_bit(In_sync, &rdev->flags) ||
  2762. recovery_start == MaxSector)
  2763. return sprintf(page, "none\n");
  2764. return sprintf(page, "%llu\n", recovery_start);
  2765. }
  2766. static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
  2767. {
  2768. unsigned long long recovery_start;
  2769. if (cmd_match(buf, "none"))
  2770. recovery_start = MaxSector;
  2771. else if (strict_strtoull(buf, 10, &recovery_start))
  2772. return -EINVAL;
  2773. if (rdev->mddev->pers &&
  2774. rdev->raid_disk >= 0)
  2775. return -EBUSY;
  2776. rdev->recovery_offset = recovery_start;
  2777. if (recovery_start == MaxSector)
  2778. set_bit(In_sync, &rdev->flags);
  2779. else
  2780. clear_bit(In_sync, &rdev->flags);
  2781. return len;
  2782. }
  2783. static struct rdev_sysfs_entry rdev_recovery_start =
  2784. __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
  2785. static ssize_t
  2786. badblocks_show(struct badblocks *bb, char *page, int unack);
  2787. static ssize_t
  2788. badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
  2789. static ssize_t bb_show(struct md_rdev *rdev, char *page)
  2790. {
  2791. return badblocks_show(&rdev->badblocks, page, 0);
  2792. }
  2793. static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
  2794. {
  2795. int rv = badblocks_store(&rdev->badblocks, page, len, 0);
  2796. /* Maybe that ack was all we needed */
  2797. if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
  2798. wake_up(&rdev->blocked_wait);
  2799. return rv;
  2800. }
  2801. static struct rdev_sysfs_entry rdev_bad_blocks =
  2802. __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
  2803. static ssize_t ubb_show(struct md_rdev *rdev, char *page)
  2804. {
  2805. return badblocks_show(&rdev->badblocks, page, 1);
  2806. }
  2807. static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
  2808. {
  2809. return badblocks_store(&rdev->badblocks, page, len, 1);
  2810. }
  2811. static struct rdev_sysfs_entry rdev_unack_bad_blocks =
  2812. __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
  2813. static struct attribute *rdev_default_attrs[] = {
  2814. &rdev_state.attr,
  2815. &rdev_errors.attr,
  2816. &rdev_slot.attr,
  2817. &rdev_offset.attr,
  2818. &rdev_new_offset.attr,
  2819. &rdev_size.attr,
  2820. &rdev_recovery_start.attr,
  2821. &rdev_bad_blocks.attr,
  2822. &rdev_unack_bad_blocks.attr,
  2823. NULL,
  2824. };
  2825. static ssize_t
  2826. rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  2827. {
  2828. struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
  2829. struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
  2830. struct mddev *mddev = rdev->mddev;
  2831. ssize_t rv;
  2832. if (!entry->show)
  2833. return -EIO;
  2834. rv = mddev ? mddev_lock(mddev) : -EBUSY;
  2835. if (!rv) {
  2836. if (rdev->mddev == NULL)
  2837. rv = -EBUSY;
  2838. else
  2839. rv = entry->show(rdev, page);
  2840. mddev_unlock(mddev);
  2841. }
  2842. return rv;
  2843. }
  2844. static ssize_t
  2845. rdev_attr_store(struct kobject *kobj, struct attribute *attr,
  2846. const char *page, size_t length)
  2847. {
  2848. struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
  2849. struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
  2850. ssize_t rv;
  2851. struct mddev *mddev = rdev->mddev;
  2852. if (!entry->store)
  2853. return -EIO;
  2854. if (!capable(CAP_SYS_ADMIN))
  2855. return -EACCES;
  2856. rv = mddev ? mddev_lock(mddev): -EBUSY;
  2857. if (!rv) {
  2858. if (rdev->mddev == NULL)
  2859. rv = -EBUSY;
  2860. else
  2861. rv = entry->store(rdev, page, length);
  2862. mddev_unlock(mddev);
  2863. }
  2864. return rv;
  2865. }
  2866. static void rdev_free(struct kobject *ko)
  2867. {
  2868. struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
  2869. kfree(rdev);
  2870. }
  2871. static const struct sysfs_ops rdev_sysfs_ops = {
  2872. .show = rdev_attr_show,
  2873. .store = rdev_attr_store,
  2874. };
  2875. static struct kobj_type rdev_ktype = {
  2876. .release = rdev_free,
  2877. .sysfs_ops = &rdev_sysfs_ops,
  2878. .default_attrs = rdev_default_attrs,
  2879. };
  2880. int md_rdev_init(struct md_rdev *rdev)
  2881. {
  2882. rdev->desc_nr = -1;
  2883. rdev->saved_raid_disk = -1;
  2884. rdev->raid_disk = -1;
  2885. rdev->flags = 0;
  2886. rdev->data_offset = 0;
  2887. rdev->new_data_offset = 0;
  2888. rdev->sb_events = 0;
  2889. rdev->last_read_error.tv_sec = 0;
  2890. rdev->last_read_error.tv_nsec = 0;
  2891. rdev->sb_loaded = 0;
  2892. rdev->bb_page = NULL;
  2893. atomic_set(&rdev->nr_pending, 0);
  2894. atomic_set(&rdev->read_errors, 0);
  2895. atomic_set(&rdev->corrected_errors, 0);
  2896. INIT_LIST_HEAD(&rdev->same_set);
  2897. init_waitqueue_head(&rdev->blocked_wait);
  2898. /* Add space to store bad block list.
  2899. * This reserves the space even on arrays where it cannot
  2900. * be used - I wonder if that matters
  2901. */
  2902. rdev->badblocks.count = 0;
  2903. rdev->badblocks.shift = 0;
  2904. rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
  2905. seqlock_init(&rdev->badblocks.lock);
  2906. if (rdev->badblocks.page == NULL)
  2907. return -ENOMEM;
  2908. return 0;
  2909. }
  2910. EXPORT_SYMBOL_GPL(md_rdev_init);
  2911. /*
  2912. * Import a device. If 'super_format' >= 0, then sanity check the superblock
  2913. *
  2914. * mark the device faulty if:
  2915. *
  2916. * - the device is nonexistent (zero size)
  2917. * - the device has no valid superblock
  2918. *
  2919. * a faulty rdev _never_ has rdev->sb set.
  2920. */
  2921. static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
  2922. {
  2923. char b[BDEVNAME_SIZE];
  2924. int err;
  2925. struct md_rdev *rdev;
  2926. sector_t size;
  2927. rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
  2928. if (!rdev) {
  2929. printk(KERN_ERR "md: could not alloc mem for new device!\n");
  2930. return ERR_PTR(-ENOMEM);
  2931. }
  2932. err = md_rdev_init(rdev);
  2933. if (err)
  2934. goto abort_free;
  2935. err = alloc_disk_sb(rdev);
  2936. if (err)
  2937. goto abort_free;
  2938. err = lock_rdev(rdev, newdev, super_format == -2);
  2939. if (err)
  2940. goto abort_free;
  2941. kobject_init(&rdev->kobj, &rdev_ktype);
  2942. size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
  2943. if (!size) {
  2944. printk(KERN_WARNING
  2945. "md: %s has zero or unknown size, marking faulty!\n",
  2946. bdevname(rdev->bdev,b));
  2947. err = -EINVAL;
  2948. goto abort_free;
  2949. }
  2950. if (super_format >= 0) {
  2951. err = super_types[super_format].
  2952. load_super(rdev, NULL, super_minor);
  2953. if (err == -EINVAL) {
  2954. printk(KERN_WARNING
  2955. "md: %s does not have a valid v%d.%d "
  2956. "superblock, not importing!\n",
  2957. bdevname(rdev->bdev,b),
  2958. super_format, super_minor);
  2959. goto abort_free;
  2960. }
  2961. if (err < 0) {
  2962. printk(KERN_WARNING
  2963. "md: could not read %s's sb, not importing!\n",
  2964. bdevname(rdev->bdev,b));
  2965. goto abort_free;
  2966. }
  2967. }
  2968. if (super_format == -1)
  2969. /* hot-add for 0.90, or non-persistent: so no badblocks */
  2970. rdev->badblocks.shift = -1;
  2971. return rdev;
  2972. abort_free:
  2973. if (rdev->bdev)
  2974. unlock_rdev(rdev);
  2975. md_rdev_clear(rdev);
  2976. kfree(rdev);
  2977. return ERR_PTR(err);
  2978. }
  2979. /*
  2980. * Check a full RAID array for plausibility
  2981. */
  2982. static void analyze_sbs(struct mddev * mddev)
  2983. {
  2984. int i;
  2985. struct md_rdev *rdev, *freshest, *tmp;
  2986. char b[BDEVNAME_SIZE];
  2987. freshest = NULL;
  2988. rdev_for_each_safe(rdev, tmp, mddev)
  2989. switch (super_types[mddev->major_version].
  2990. load_super(rdev, freshest, mddev->minor_version)) {
  2991. case 1:
  2992. freshest = rdev;
  2993. break;
  2994. case 0:
  2995. break;
  2996. default:
  2997. printk( KERN_ERR \
  2998. "md: fatal superblock inconsistency in %s"
  2999. " -- removing from array\n",
  3000. bdevname(rdev->bdev,b));
  3001. kick_rdev_from_array(rdev);
  3002. }
  3003. super_types[mddev->major_version].
  3004. validate_super(mddev, freshest);
  3005. i = 0;
  3006. rdev_for_each_safe(rdev, tmp, mddev) {
  3007. if (mddev->max_disks &&
  3008. (rdev->desc_nr >= mddev->max_disks ||
  3009. i > mddev->max_disks)) {
  3010. printk(KERN_WARNING
  3011. "md: %s: %s: only %d devices permitted\n",
  3012. mdname(mddev), bdevname(rdev->bdev, b),
  3013. mddev->max_disks);
  3014. kick_rdev_from_array(rdev);
  3015. continue;
  3016. }
  3017. if (rdev != freshest)
  3018. if (super_types[mddev->major_version].
  3019. validate_super(mddev, rdev)) {
  3020. printk(KERN_WARNING "md: kicking non-fresh %s"
  3021. " from array!\n",
  3022. bdevname(rdev->bdev,b));
  3023. kick_rdev_from_array(rdev);
  3024. continue;
  3025. }
  3026. if (mddev->level == LEVEL_MULTIPATH) {
  3027. rdev->desc_nr = i++;
  3028. rdev->raid_disk = rdev->desc_nr;
  3029. set_bit(In_sync, &rdev->flags);
  3030. } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
  3031. rdev->raid_disk = -1;
  3032. clear_bit(In_sync, &rdev->flags);
  3033. }
  3034. }
  3035. }
  3036. /* Read a fixed-point number.
  3037. * Numbers in sysfs attributes should be in "standard" units where
  3038. * possible, so time should be in seconds.
  3039. * However we internally use a a much smaller unit such as
  3040. * milliseconds or jiffies.
  3041. * This function takes a decimal number with a possible fractional
  3042. * component, and produces an integer which is the result of
  3043. * multiplying that number by 10^'scale'.
  3044. * all without any floating-point arithmetic.
  3045. */
  3046. int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
  3047. {
  3048. unsigned long result = 0;
  3049. long decimals = -1;
  3050. while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
  3051. if (*cp == '.')
  3052. decimals = 0;
  3053. else if (decimals < scale) {
  3054. unsigned int value;
  3055. value = *cp - '0';
  3056. result = result * 10 + value;
  3057. if (decimals >= 0)
  3058. decimals++;
  3059. }
  3060. cp++;
  3061. }
  3062. if (*cp == '\n')
  3063. cp++;
  3064. if (*cp)
  3065. return -EINVAL;
  3066. if (decimals < 0)
  3067. decimals = 0;
  3068. while (decimals < scale) {
  3069. result *= 10;
  3070. decimals ++;
  3071. }
  3072. *res = result;
  3073. return 0;
  3074. }
  3075. static void md_safemode_timeout(unsigned long data);
  3076. static ssize_t
  3077. safe_delay_show(struct mddev *mddev, char *page)
  3078. {
  3079. int msec = (mddev->safemode_delay*1000)/HZ;
  3080. return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
  3081. }
  3082. static ssize_t
  3083. safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
  3084. {
  3085. unsigned long msec;
  3086. if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
  3087. return -EINVAL;
  3088. if (msec == 0)
  3089. mddev->safemode_delay = 0;
  3090. else {
  3091. unsigned long old_delay = mddev->safemode_delay;
  3092. mddev->safemode_delay = (msec*HZ)/1000;
  3093. if (mddev->safemode_delay == 0)
  3094. mddev->safemode_delay = 1;
  3095. if (mddev->safemode_delay < old_delay)
  3096. md_safemode_timeout((unsigned long)mddev);
  3097. }
  3098. return len;
  3099. }
  3100. static struct md_sysfs_entry md_safe_delay =
  3101. __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
  3102. static ssize_t
  3103. level_show(struct mddev *mddev, char *page)
  3104. {
  3105. struct md_personality *p = mddev->pers;
  3106. if (p)
  3107. return sprintf(page, "%s\n", p->name);
  3108. else if (mddev->clevel[0])
  3109. return sprintf(page, "%s\n", mddev->clevel);
  3110. else if (mddev->level != LEVEL_NONE)
  3111. return sprintf(page, "%d\n", mddev->level);
  3112. else
  3113. return 0;
  3114. }
  3115. static ssize_t
  3116. level_store(struct mddev *mddev, const char *buf, size_t len)
  3117. {
  3118. char clevel[16];
  3119. ssize_t rv = len;
  3120. struct md_personality *pers;
  3121. long level;
  3122. void *priv;
  3123. struct md_rdev *rdev;
  3124. if (mddev->pers == NULL) {
  3125. if (len == 0)
  3126. return 0;
  3127. if (len >= sizeof(mddev->clevel))
  3128. return -ENOSPC;
  3129. strncpy(mddev->clevel, buf, len);
  3130. if (mddev->clevel[len-1] == '\n')
  3131. len--;
  3132. mddev->clevel[len] = 0;
  3133. mddev->level = LEVEL_NONE;
  3134. return rv;
  3135. }
  3136. /* request to change the personality. Need to ensure:
  3137. * - array is not engaged in resync/recovery/reshape
  3138. * - old personality can be suspended
  3139. * - new personality will access other array.
  3140. */
  3141. if (mddev->sync_thread ||
  3142. mddev->reshape_position != MaxSector ||
  3143. mddev->sysfs_active)
  3144. return -EBUSY;
  3145. if (!mddev->pers->quiesce) {
  3146. printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
  3147. mdname(mddev), mddev->pers->name);
  3148. return -EINVAL;
  3149. }
  3150. /* Now find the new personality */
  3151. if (len == 0 || len >= sizeof(clevel))
  3152. return -EINVAL;
  3153. strncpy(clevel, buf, len);
  3154. if (clevel[len-1] == '\n')
  3155. len--;
  3156. clevel[len] = 0;
  3157. if (strict_strtol(clevel, 10, &level))
  3158. level = LEVEL_NONE;
  3159. if (request_module("md-%s", clevel) != 0)
  3160. request_module("md-level-%s", clevel);
  3161. spin_lock(&pers_lock);
  3162. pers = find_pers(level, clevel);
  3163. if (!pers || !try_module_get(pers->owner)) {
  3164. spin_unlock(&pers_lock);
  3165. printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
  3166. return -EINVAL;
  3167. }
  3168. spin_unlock(&pers_lock);
  3169. if (pers == mddev->pers) {
  3170. /* Nothing to do! */
  3171. module_put(pers->owner);
  3172. return rv;
  3173. }
  3174. if (!pers->takeover) {
  3175. module_put(pers->owner);
  3176. printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
  3177. mdname(mddev), clevel);
  3178. return -EINVAL;
  3179. }
  3180. rdev_for_each(rdev, mddev)
  3181. rdev->new_raid_disk = rdev->raid_disk;
  3182. /* ->takeover must set new_* and/or delta_disks
  3183. * if it succeeds, and may set them when it fails.
  3184. */
  3185. priv = pers->takeover(mddev);
  3186. if (IS_ERR(priv)) {
  3187. mddev->new_level = mddev->level;
  3188. mddev->new_layout = mddev->layout;
  3189. mddev->new_chunk_sectors = mddev->chunk_sectors;
  3190. mddev->raid_disks -= mddev->delta_disks;
  3191. mddev->delta_disks = 0;
  3192. mddev->reshape_backwards = 0;
  3193. module_put(pers->owner);
  3194. printk(KERN_WARNING "md: %s: %s would not accept array\n",
  3195. mdname(mddev), clevel);
  3196. return PTR_ERR(priv);
  3197. }
  3198. /* Looks like we have a winner */
  3199. mddev_suspend(mddev);
  3200. mddev->pers->stop(mddev);
  3201. if (mddev->pers->sync_request == NULL &&
  3202. pers->sync_request != NULL) {
  3203. /* need to add the md_redundancy_group */
  3204. if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
  3205. printk(KERN_WARNING
  3206. "md: cannot register extra attributes for %s\n",
  3207. mdname(mddev));
  3208. mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
  3209. }
  3210. if (mddev->pers->sync_request != NULL &&
  3211. pers->sync_request == NULL) {
  3212. /* need to remove the md_redundancy_group */
  3213. if (mddev->to_remove == NULL)
  3214. mddev->to_remove = &md_redundancy_group;
  3215. }
  3216. if (mddev->pers->sync_request == NULL &&
  3217. mddev->external) {
  3218. /* We are converting from a no-redundancy array
  3219. * to a redundancy array and metadata is managed
  3220. * externally so we need to be sure that writes
  3221. * won't block due to a need to transition
  3222. * clean->dirty
  3223. * until external management is started.
  3224. */
  3225. mddev->in_sync = 0;
  3226. mddev->safemode_delay = 0;
  3227. mddev->safemode = 0;
  3228. }
  3229. rdev_for_each(rdev, mddev) {
  3230. if (rdev->raid_disk < 0)
  3231. continue;
  3232. if (rdev->new_raid_disk >= mddev->raid_disks)
  3233. rdev->new_raid_disk = -1;
  3234. if (rdev->new_raid_disk == rdev->raid_disk)
  3235. continue;
  3236. sysfs_unlink_rdev(mddev, rdev);
  3237. }
  3238. rdev_for_each(rdev, mddev) {
  3239. if (rdev->raid_disk < 0)
  3240. continue;
  3241. if (rdev->new_raid_disk == rdev->raid_disk)
  3242. continue;
  3243. rdev->raid_disk = rdev->new_raid_disk;
  3244. if (rdev->raid_disk < 0)
  3245. clear_bit(In_sync, &rdev->flags);
  3246. else {
  3247. if (sysfs_link_rdev(mddev, rdev))
  3248. printk(KERN_WARNING "md: cannot register rd%d"
  3249. " for %s after level change\n",
  3250. rdev->raid_disk, mdname(mddev));
  3251. }
  3252. }
  3253. module_put(mddev->pers->owner);
  3254. mddev->pers = pers;
  3255. mddev->private = priv;
  3256. strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
  3257. mddev->level = mddev->new_level;
  3258. mddev->layout = mddev->new_layout;
  3259. mddev->chunk_sectors = mddev->new_chunk_sectors;
  3260. mddev->delta_disks = 0;
  3261. mddev->reshape_backwards = 0;
  3262. mddev->degraded = 0;
  3263. if (mddev->pers->sync_request == NULL) {
  3264. /* this is now an array without redundancy, so
  3265. * it must always be in_sync
  3266. */
  3267. mddev->in_sync = 1;
  3268. del_timer_sync(&mddev->safemode_timer);
  3269. }
  3270. pers->run(mddev);
  3271. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  3272. mddev_resume(mddev);
  3273. sysfs_notify(&mddev->kobj, NULL, "level");
  3274. md_new_event(mddev);
  3275. return rv;
  3276. }
  3277. static struct md_sysfs_entry md_level =
  3278. __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
  3279. static ssize_t
  3280. layout_show(struct mddev *mddev, char *page)
  3281. {
  3282. /* just a number, not meaningful for all levels */
  3283. if (mddev->reshape_position != MaxSector &&
  3284. mddev->layout != mddev->new_layout)
  3285. return sprintf(page, "%d (%d)\n",
  3286. mddev->new_layout, mddev->layout);
  3287. return sprintf(page, "%d\n", mddev->layout);
  3288. }
  3289. static ssize_t
  3290. layout_store(struct mddev *mddev, const char *buf, size_t len)
  3291. {
  3292. char *e;
  3293. unsigned long n = simple_strtoul(buf, &e, 10);
  3294. if (!*buf || (*e && *e != '\n'))
  3295. return -EINVAL;
  3296. if (mddev->pers) {
  3297. int err;
  3298. if (mddev->pers->check_reshape == NULL)
  3299. return -EBUSY;
  3300. mddev->new_layout = n;
  3301. err = mddev->pers->check_reshape(mddev);
  3302. if (err) {
  3303. mddev->new_layout = mddev->layout;
  3304. return err;
  3305. }
  3306. } else {
  3307. mddev->new_layout = n;
  3308. if (mddev->reshape_position == MaxSector)
  3309. mddev->layout = n;
  3310. }
  3311. return len;
  3312. }
  3313. static struct md_sysfs_entry md_layout =
  3314. __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
  3315. static ssize_t
  3316. raid_disks_show(struct mddev *mddev, char *page)
  3317. {
  3318. if (mddev->raid_disks == 0)
  3319. return 0;
  3320. if (mddev->reshape_position != MaxSector &&
  3321. mddev->delta_disks != 0)
  3322. return sprintf(page, "%d (%d)\n", mddev->raid_disks,
  3323. mddev->raid_disks - mddev->delta_disks);
  3324. return sprintf(page, "%d\n", mddev->raid_disks);
  3325. }
  3326. static int update_raid_disks(struct mddev *mddev, int raid_disks);
  3327. static ssize_t
  3328. raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
  3329. {
  3330. char *e;
  3331. int rv = 0;
  3332. unsigned long n = simple_strtoul(buf, &e, 10);
  3333. if (!*buf || (*e && *e != '\n'))
  3334. return -EINVAL;
  3335. if (mddev->pers)
  3336. rv = update_raid_disks(mddev, n);
  3337. else if (mddev->reshape_position != MaxSector) {
  3338. struct md_rdev *rdev;
  3339. int olddisks = mddev->raid_disks - mddev->delta_disks;
  3340. rdev_for_each(rdev, mddev) {
  3341. if (olddisks < n &&
  3342. rdev->data_offset < rdev->new_data_offset)
  3343. return -EINVAL;
  3344. if (olddisks > n &&
  3345. rdev->data_offset > rdev->new_data_offset)
  3346. return -EINVAL;
  3347. }
  3348. mddev->delta_disks = n - olddisks;
  3349. mddev->raid_disks = n;
  3350. mddev->reshape_backwards = (mddev->delta_disks < 0);
  3351. } else
  3352. mddev->raid_disks = n;
  3353. return rv ? rv : len;
  3354. }
  3355. static struct md_sysfs_entry md_raid_disks =
  3356. __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
  3357. static ssize_t
  3358. chunk_size_show(struct mddev *mddev, char *page)
  3359. {
  3360. if (mddev->reshape_position != MaxSector &&
  3361. mddev->chunk_sectors != mddev->new_chunk_sectors)
  3362. return sprintf(page, "%d (%d)\n",
  3363. mddev->new_chunk_sectors << 9,
  3364. mddev->chunk_sectors << 9);
  3365. return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
  3366. }
  3367. static ssize_t
  3368. chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
  3369. {
  3370. char *e;
  3371. unsigned long n = simple_strtoul(buf, &e, 10);
  3372. if (!*buf || (*e && *e != '\n'))
  3373. return -EINVAL;
  3374. if (mddev->pers) {
  3375. int err;
  3376. if (mddev->pers->check_reshape == NULL)
  3377. return -EBUSY;
  3378. mddev->new_chunk_sectors = n >> 9;
  3379. err = mddev->pers->check_reshape(mddev);
  3380. if (err) {
  3381. mddev->new_chunk_sectors = mddev->chunk_sectors;
  3382. return err;
  3383. }
  3384. } else {
  3385. mddev->new_chunk_sectors = n >> 9;
  3386. if (mddev->reshape_position == MaxSector)
  3387. mddev->chunk_sectors = n >> 9;
  3388. }
  3389. return len;
  3390. }
  3391. static struct md_sysfs_entry md_chunk_size =
  3392. __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
  3393. static ssize_t
  3394. resync_start_show(struct mddev *mddev, char *page)
  3395. {
  3396. if (mddev->recovery_cp == MaxSector)
  3397. return sprintf(page, "none\n");
  3398. return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
  3399. }
  3400. static ssize_t
  3401. resync_start_store(struct mddev *mddev, const char *buf, size_t len)
  3402. {
  3403. char *e;
  3404. unsigned long long n = simple_strtoull(buf, &e, 10);
  3405. if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
  3406. return -EBUSY;
  3407. if (cmd_match(buf, "none"))
  3408. n = MaxSector;
  3409. else if (!*buf || (*e && *e != '\n'))
  3410. return -EINVAL;
  3411. mddev->recovery_cp = n;
  3412. return len;
  3413. }
  3414. static struct md_sysfs_entry md_resync_start =
  3415. __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
  3416. /*
  3417. * The array state can be:
  3418. *
  3419. * clear
  3420. * No devices, no size, no level
  3421. * Equivalent to STOP_ARRAY ioctl
  3422. * inactive
  3423. * May have some settings, but array is not active
  3424. * all IO results in error
  3425. * When written, doesn't tear down array, but just stops it
  3426. * suspended (not supported yet)
  3427. * All IO requests will block. The array can be reconfigured.
  3428. * Writing this, if accepted, will block until array is quiescent
  3429. * readonly
  3430. * no resync can happen. no superblocks get written.
  3431. * write requests fail
  3432. * read-auto
  3433. * like readonly, but behaves like 'clean' on a write request.
  3434. *
  3435. * clean - no pending writes, but otherwise active.
  3436. * When written to inactive array, starts without resync
  3437. * If a write request arrives then
  3438. * if metadata is known, mark 'dirty' and switch to 'active'.
  3439. * if not known, block and switch to write-pending
  3440. * If written to an active array that has pending writes, then fails.
  3441. * active
  3442. * fully active: IO and resync can be happening.
  3443. * When written to inactive array, starts with resync
  3444. *
  3445. * write-pending
  3446. * clean, but writes are blocked waiting for 'active' to be written.
  3447. *
  3448. * active-idle
  3449. * like active, but no writes have been seen for a while (100msec).
  3450. *
  3451. */
  3452. enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
  3453. write_pending, active_idle, bad_word};
  3454. static char *array_states[] = {
  3455. "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
  3456. "write-pending", "active-idle", NULL };
  3457. static int match_word(const char *word, char **list)
  3458. {
  3459. int n;
  3460. for (n=0; list[n]; n++)
  3461. if (cmd_match(word, list[n]))
  3462. break;
  3463. return n;
  3464. }
  3465. static ssize_t
  3466. array_state_show(struct mddev *mddev, char *page)
  3467. {
  3468. enum array_state st = inactive;
  3469. if (mddev->pers)
  3470. switch(mddev->ro) {
  3471. case 1:
  3472. st = readonly;
  3473. break;
  3474. case 2:
  3475. st = read_auto;
  3476. break;
  3477. case 0:
  3478. if (mddev->in_sync)
  3479. st = clean;
  3480. else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
  3481. st = write_pending;
  3482. else if (mddev->safemode)
  3483. st = active_idle;
  3484. else
  3485. st = active;
  3486. }
  3487. else {
  3488. if (list_empty(&mddev->disks) &&
  3489. mddev->raid_disks == 0 &&
  3490. mddev->dev_sectors == 0)
  3491. st = clear;
  3492. else
  3493. st = inactive;
  3494. }
  3495. return sprintf(page, "%s\n", array_states[st]);
  3496. }
  3497. static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
  3498. static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
  3499. static int do_md_run(struct mddev * mddev);
  3500. static int restart_array(struct mddev *mddev);
  3501. static ssize_t
  3502. array_state_store(struct mddev *mddev, const char *buf, size_t len)
  3503. {
  3504. int err = -EINVAL;
  3505. enum array_state st = match_word(buf, array_states);
  3506. switch(st) {
  3507. case bad_word:
  3508. break;
  3509. case clear:
  3510. /* stopping an active array */
  3511. if (atomic_read(&mddev->openers) > 0)
  3512. return -EBUSY;
  3513. err = do_md_stop(mddev, 0, NULL);
  3514. break;
  3515. case inactive:
  3516. /* stopping an active array */
  3517. if (mddev->pers) {
  3518. if (atomic_read(&mddev->openers) > 0)
  3519. return -EBUSY;
  3520. err = do_md_stop(mddev, 2, NULL);
  3521. } else
  3522. err = 0; /* already inactive */
  3523. break;
  3524. case suspended:
  3525. break; /* not supported yet */
  3526. case readonly:
  3527. if (mddev->pers)
  3528. err = md_set_readonly(mddev, NULL);
  3529. else {
  3530. mddev->ro = 1;
  3531. set_disk_ro(mddev->gendisk, 1);
  3532. err = do_md_run(mddev);
  3533. }
  3534. break;
  3535. case read_auto:
  3536. if (mddev->pers) {
  3537. if (mddev->ro == 0)
  3538. err = md_set_readonly(mddev, NULL);
  3539. else if (mddev->ro == 1)
  3540. err = restart_array(mddev);
  3541. if (err == 0) {
  3542. mddev->ro = 2;
  3543. set_disk_ro(mddev->gendisk, 0);
  3544. }
  3545. } else {
  3546. mddev->ro = 2;
  3547. err = do_md_run(mddev);
  3548. }
  3549. break;
  3550. case clean:
  3551. if (mddev->pers) {
  3552. restart_array(mddev);
  3553. spin_lock_irq(&mddev->write_lock);
  3554. if (atomic_read(&mddev->writes_pending) == 0) {
  3555. if (mddev->in_sync == 0) {
  3556. mddev->in_sync = 1;
  3557. if (mddev->safemode == 1)
  3558. mddev->safemode = 0;
  3559. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  3560. }
  3561. err = 0;
  3562. } else
  3563. err = -EBUSY;
  3564. spin_unlock_irq(&mddev->write_lock);
  3565. } else
  3566. err = -EINVAL;
  3567. break;
  3568. case active:
  3569. if (mddev->pers) {
  3570. restart_array(mddev);
  3571. clear_bit(MD_CHANGE_PENDING, &mddev->flags);
  3572. wake_up(&mddev->sb_wait);
  3573. err = 0;
  3574. } else {
  3575. mddev->ro = 0;
  3576. set_disk_ro(mddev->gendisk, 0);
  3577. err = do_md_run(mddev);
  3578. }
  3579. break;
  3580. case write_pending:
  3581. case active_idle:
  3582. /* these cannot be set */
  3583. break;
  3584. }
  3585. if (err)
  3586. return err;
  3587. else {
  3588. if (mddev->hold_active == UNTIL_IOCTL)
  3589. mddev->hold_active = 0;
  3590. sysfs_notify_dirent_safe(mddev->sysfs_state);
  3591. return len;
  3592. }
  3593. }
  3594. static struct md_sysfs_entry md_array_state =
  3595. __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
  3596. static ssize_t
  3597. max_corrected_read_errors_show(struct mddev *mddev, char *page) {
  3598. return sprintf(page, "%d\n",
  3599. atomic_read(&mddev->max_corr_read_errors));
  3600. }
  3601. static ssize_t
  3602. max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
  3603. {
  3604. char *e;
  3605. unsigned long n = simple_strtoul(buf, &e, 10);
  3606. if (*buf && (*e == 0 || *e == '\n')) {
  3607. atomic_set(&mddev->max_corr_read_errors, n);
  3608. return len;
  3609. }
  3610. return -EINVAL;
  3611. }
  3612. static struct md_sysfs_entry max_corr_read_errors =
  3613. __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
  3614. max_corrected_read_errors_store);
  3615. static ssize_t
  3616. null_show(struct mddev *mddev, char *page)
  3617. {
  3618. return -EINVAL;
  3619. }
  3620. static ssize_t
  3621. new_dev_store(struct mddev *mddev, const char *buf, size_t len)
  3622. {
  3623. /* buf must be %d:%d\n? giving major and minor numbers */
  3624. /* The new device is added to the array.
  3625. * If the array has a persistent superblock, we read the
  3626. * superblock to initialise info and check validity.
  3627. * Otherwise, only checking done is that in bind_rdev_to_array,
  3628. * which mainly checks size.
  3629. */
  3630. char *e;
  3631. int major = simple_strtoul(buf, &e, 10);
  3632. int minor;
  3633. dev_t dev;
  3634. struct md_rdev *rdev;
  3635. int err;
  3636. if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
  3637. return -EINVAL;
  3638. minor = simple_strtoul(e+1, &e, 10);
  3639. if (*e && *e != '\n')
  3640. return -EINVAL;
  3641. dev = MKDEV(major, minor);
  3642. if (major != MAJOR(dev) ||
  3643. minor != MINOR(dev))
  3644. return -EOVERFLOW;
  3645. if (mddev->persistent) {
  3646. rdev = md_import_device(dev, mddev->major_version,
  3647. mddev->minor_version);
  3648. if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
  3649. struct md_rdev *rdev0
  3650. = list_entry(mddev->disks.next,
  3651. struct md_rdev, same_set);
  3652. err = super_types[mddev->major_version]
  3653. .load_super(rdev, rdev0, mddev->minor_version);
  3654. if (err < 0)
  3655. goto out;
  3656. }
  3657. } else if (mddev->external)
  3658. rdev = md_import_device(dev, -2, -1);
  3659. else
  3660. rdev = md_import_device(dev, -1, -1);
  3661. if (IS_ERR(rdev))
  3662. return PTR_ERR(rdev);
  3663. err = bind_rdev_to_array(rdev, mddev);
  3664. out:
  3665. if (err)
  3666. export_rdev(rdev);
  3667. return err ? err : len;
  3668. }
  3669. static struct md_sysfs_entry md_new_device =
  3670. __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
  3671. static ssize_t
  3672. bitmap_store(struct mddev *mddev, const char *buf, size_t len)
  3673. {
  3674. char *end;
  3675. unsigned long chunk, end_chunk;
  3676. if (!mddev->bitmap)
  3677. goto out;
  3678. /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
  3679. while (*buf) {
  3680. chunk = end_chunk = simple_strtoul(buf, &end, 0);
  3681. if (buf == end) break;
  3682. if (*end == '-') { /* range */
  3683. buf = end + 1;
  3684. end_chunk = simple_strtoul(buf, &end, 0);
  3685. if (buf == end) break;
  3686. }
  3687. if (*end && !isspace(*end)) break;
  3688. bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
  3689. buf = skip_spaces(end);
  3690. }
  3691. bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
  3692. out:
  3693. return len;
  3694. }
  3695. static struct md_sysfs_entry md_bitmap =
  3696. __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
  3697. static ssize_t
  3698. size_show(struct mddev *mddev, char *page)
  3699. {
  3700. return sprintf(page, "%llu\n",
  3701. (unsigned long long)mddev->dev_sectors / 2);
  3702. }
  3703. static int update_size(struct mddev *mddev, sector_t num_sectors);
  3704. static ssize_t
  3705. size_store(struct mddev *mddev, const char *buf, size_t len)
  3706. {
  3707. /* If array is inactive, we can reduce the component size, but
  3708. * not increase it (except from 0).
  3709. * If array is active, we can try an on-line resize
  3710. */
  3711. sector_t sectors;
  3712. int err = strict_blocks_to_sectors(buf, &sectors);
  3713. if (err < 0)
  3714. return err;
  3715. if (mddev->pers) {
  3716. err = update_size(mddev, sectors);
  3717. md_update_sb(mddev, 1);
  3718. } else {
  3719. if (mddev->dev_sectors == 0 ||
  3720. mddev->dev_sectors > sectors)
  3721. mddev->dev_sectors = sectors;
  3722. else
  3723. err = -ENOSPC;
  3724. }
  3725. return err ? err : len;
  3726. }
  3727. static struct md_sysfs_entry md_size =
  3728. __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
  3729. /* Metdata version.
  3730. * This is one of
  3731. * 'none' for arrays with no metadata (good luck...)
  3732. * 'external' for arrays with externally managed metadata,
  3733. * or N.M for internally known formats
  3734. */
  3735. static ssize_t
  3736. metadata_show(struct mddev *mddev, char *page)
  3737. {
  3738. if (mddev->persistent)
  3739. return sprintf(page, "%d.%d\n",
  3740. mddev->major_version, mddev->minor_version);
  3741. else if (mddev->external)
  3742. return sprintf(page, "external:%s\n", mddev->metadata_type);
  3743. else
  3744. return sprintf(page, "none\n");
  3745. }
  3746. static ssize_t
  3747. metadata_store(struct mddev *mddev, const char *buf, size_t len)
  3748. {
  3749. int major, minor;
  3750. char *e;
  3751. /* Changing the details of 'external' metadata is
  3752. * always permitted. Otherwise there must be
  3753. * no devices attached to the array.
  3754. */
  3755. if (mddev->external && strncmp(buf, "external:", 9) == 0)
  3756. ;
  3757. else if (!list_empty(&mddev->disks))
  3758. return -EBUSY;
  3759. if (cmd_match(buf, "none")) {
  3760. mddev->persistent = 0;
  3761. mddev->external = 0;
  3762. mddev->major_version = 0;
  3763. mddev->minor_version = 90;
  3764. return len;
  3765. }
  3766. if (strncmp(buf, "external:", 9) == 0) {
  3767. size_t namelen = len-9;
  3768. if (namelen >= sizeof(mddev->metadata_type))
  3769. namelen = sizeof(mddev->metadata_type)-1;
  3770. strncpy(mddev->metadata_type, buf+9, namelen);
  3771. mddev->metadata_type[namelen] = 0;
  3772. if (namelen && mddev->metadata_type[namelen-1] == '\n')
  3773. mddev->metadata_type[--namelen] = 0;
  3774. mddev->persistent = 0;
  3775. mddev->external = 1;
  3776. mddev->major_version = 0;
  3777. mddev->minor_version = 90;
  3778. return len;
  3779. }
  3780. major = simple_strtoul(buf, &e, 10);
  3781. if (e==buf || *e != '.')
  3782. return -EINVAL;
  3783. buf = e+1;
  3784. minor = simple_strtoul(buf, &e, 10);
  3785. if (e==buf || (*e && *e != '\n') )
  3786. return -EINVAL;
  3787. if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
  3788. return -ENOENT;
  3789. mddev->major_version = major;
  3790. mddev->minor_version = minor;
  3791. mddev->persistent = 1;
  3792. mddev->external = 0;
  3793. return len;
  3794. }
  3795. static struct md_sysfs_entry md_metadata =
  3796. __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
  3797. static ssize_t
  3798. action_show(struct mddev *mddev, char *page)
  3799. {
  3800. char *type = "idle";
  3801. if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
  3802. type = "frozen";
  3803. else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
  3804. (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
  3805. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  3806. type = "reshape";
  3807. else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  3808. if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  3809. type = "resync";
  3810. else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
  3811. type = "check";
  3812. else
  3813. type = "repair";
  3814. } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
  3815. type = "recover";
  3816. }
  3817. return sprintf(page, "%s\n", type);
  3818. }
  3819. static void reap_sync_thread(struct mddev *mddev);
  3820. static ssize_t
  3821. action_store(struct mddev *mddev, const char *page, size_t len)
  3822. {
  3823. if (!mddev->pers || !mddev->pers->sync_request)
  3824. return -EINVAL;
  3825. if (cmd_match(page, "frozen"))
  3826. set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  3827. else
  3828. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  3829. if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
  3830. if (mddev->sync_thread) {
  3831. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  3832. reap_sync_thread(mddev);
  3833. }
  3834. } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
  3835. test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
  3836. return -EBUSY;
  3837. else if (cmd_match(page, "resync"))
  3838. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  3839. else if (cmd_match(page, "recover")) {
  3840. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  3841. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  3842. } else if (cmd_match(page, "reshape")) {
  3843. int err;
  3844. if (mddev->pers->start_reshape == NULL)
  3845. return -EINVAL;
  3846. err = mddev->pers->start_reshape(mddev);
  3847. if (err)
  3848. return err;
  3849. sysfs_notify(&mddev->kobj, NULL, "degraded");
  3850. } else {
  3851. if (cmd_match(page, "check"))
  3852. set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  3853. else if (!cmd_match(page, "repair"))
  3854. return -EINVAL;
  3855. set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  3856. set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  3857. }
  3858. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  3859. md_wakeup_thread(mddev->thread);
  3860. sysfs_notify_dirent_safe(mddev->sysfs_action);
  3861. return len;
  3862. }
  3863. static ssize_t
  3864. mismatch_cnt_show(struct mddev *mddev, char *page)
  3865. {
  3866. return sprintf(page, "%llu\n",
  3867. (unsigned long long) mddev->resync_mismatches);
  3868. }
  3869. static struct md_sysfs_entry md_scan_mode =
  3870. __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
  3871. static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
  3872. static ssize_t
  3873. sync_min_show(struct mddev *mddev, char *page)
  3874. {
  3875. return sprintf(page, "%d (%s)\n", speed_min(mddev),
  3876. mddev->sync_speed_min ? "local": "system");
  3877. }
  3878. static ssize_t
  3879. sync_min_store(struct mddev *mddev, const char *buf, size_t len)
  3880. {
  3881. int min;
  3882. char *e;
  3883. if (strncmp(buf, "system", 6)==0) {
  3884. mddev->sync_speed_min = 0;
  3885. return len;
  3886. }
  3887. min = simple_strtoul(buf, &e, 10);
  3888. if (buf == e || (*e && *e != '\n') || min <= 0)
  3889. return -EINVAL;
  3890. mddev->sync_speed_min = min;
  3891. return len;
  3892. }
  3893. static struct md_sysfs_entry md_sync_min =
  3894. __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
  3895. static ssize_t
  3896. sync_max_show(struct mddev *mddev, char *page)
  3897. {
  3898. return sprintf(page, "%d (%s)\n", speed_max(mddev),
  3899. mddev->sync_speed_max ? "local": "system");
  3900. }
  3901. static ssize_t
  3902. sync_max_store(struct mddev *mddev, const char *buf, size_t len)
  3903. {
  3904. int max;
  3905. char *e;
  3906. if (strncmp(buf, "system", 6)==0) {
  3907. mddev->sync_speed_max = 0;
  3908. return len;
  3909. }
  3910. max = simple_strtoul(buf, &e, 10);
  3911. if (buf == e || (*e && *e != '\n') || max <= 0)
  3912. return -EINVAL;
  3913. mddev->sync_speed_max = max;
  3914. return len;
  3915. }
  3916. static struct md_sysfs_entry md_sync_max =
  3917. __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
  3918. static ssize_t
  3919. degraded_show(struct mddev *mddev, char *page)
  3920. {
  3921. return sprintf(page, "%d\n", mddev->degraded);
  3922. }
  3923. static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
  3924. static ssize_t
  3925. sync_force_parallel_show(struct mddev *mddev, char *page)
  3926. {
  3927. return sprintf(page, "%d\n", mddev->parallel_resync);
  3928. }
  3929. static ssize_t
  3930. sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
  3931. {
  3932. long n;
  3933. if (strict_strtol(buf, 10, &n))
  3934. return -EINVAL;
  3935. if (n != 0 && n != 1)
  3936. return -EINVAL;
  3937. mddev->parallel_resync = n;
  3938. if (mddev->sync_thread)
  3939. wake_up(&resync_wait);
  3940. return len;
  3941. }
  3942. /* force parallel resync, even with shared block devices */
  3943. static struct md_sysfs_entry md_sync_force_parallel =
  3944. __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
  3945. sync_force_parallel_show, sync_force_parallel_store);
  3946. static ssize_t
  3947. sync_speed_show(struct mddev *mddev, char *page)
  3948. {
  3949. unsigned long resync, dt, db;
  3950. if (mddev->curr_resync == 0)
  3951. return sprintf(page, "none\n");
  3952. resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
  3953. dt = (jiffies - mddev->resync_mark) / HZ;
  3954. if (!dt) dt++;
  3955. db = resync - mddev->resync_mark_cnt;
  3956. return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
  3957. }
  3958. static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
  3959. static ssize_t
  3960. sync_completed_show(struct mddev *mddev, char *page)
  3961. {
  3962. unsigned long long max_sectors, resync;
  3963. if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  3964. return sprintf(page, "none\n");
  3965. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
  3966. test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  3967. max_sectors = mddev->resync_max_sectors;
  3968. else
  3969. max_sectors = mddev->dev_sectors;
  3970. resync = mddev->curr_resync_completed;
  3971. return sprintf(page, "%llu / %llu\n", resync, max_sectors);
  3972. }
  3973. static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
  3974. static ssize_t
  3975. min_sync_show(struct mddev *mddev, char *page)
  3976. {
  3977. return sprintf(page, "%llu\n",
  3978. (unsigned long long)mddev->resync_min);
  3979. }
  3980. static ssize_t
  3981. min_sync_store(struct mddev *mddev, const char *buf, size_t len)
  3982. {
  3983. unsigned long long min;
  3984. if (strict_strtoull(buf, 10, &min))
  3985. return -EINVAL;
  3986. if (min > mddev->resync_max)
  3987. return -EINVAL;
  3988. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  3989. return -EBUSY;
  3990. /* Must be a multiple of chunk_size */
  3991. if (mddev->chunk_sectors) {
  3992. sector_t temp = min;
  3993. if (sector_div(temp, mddev->chunk_sectors))
  3994. return -EINVAL;
  3995. }
  3996. mddev->resync_min = min;
  3997. return len;
  3998. }
  3999. static struct md_sysfs_entry md_min_sync =
  4000. __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
  4001. static ssize_t
  4002. max_sync_show(struct mddev *mddev, char *page)
  4003. {
  4004. if (mddev->resync_max == MaxSector)
  4005. return sprintf(page, "max\n");
  4006. else
  4007. return sprintf(page, "%llu\n",
  4008. (unsigned long long)mddev->resync_max);
  4009. }
  4010. static ssize_t
  4011. max_sync_store(struct mddev *mddev, const char *buf, size_t len)
  4012. {
  4013. if (strncmp(buf, "max", 3) == 0)
  4014. mddev->resync_max = MaxSector;
  4015. else {
  4016. unsigned long long max;
  4017. if (strict_strtoull(buf, 10, &max))
  4018. return -EINVAL;
  4019. if (max < mddev->resync_min)
  4020. return -EINVAL;
  4021. if (max < mddev->resync_max &&
  4022. mddev->ro == 0 &&
  4023. test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  4024. return -EBUSY;
  4025. /* Must be a multiple of chunk_size */
  4026. if (mddev->chunk_sectors) {
  4027. sector_t temp = max;
  4028. if (sector_div(temp, mddev->chunk_sectors))
  4029. return -EINVAL;
  4030. }
  4031. mddev->resync_max = max;
  4032. }
  4033. wake_up(&mddev->recovery_wait);
  4034. return len;
  4035. }
  4036. static struct md_sysfs_entry md_max_sync =
  4037. __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
  4038. static ssize_t
  4039. suspend_lo_show(struct mddev *mddev, char *page)
  4040. {
  4041. return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
  4042. }
  4043. static ssize_t
  4044. suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
  4045. {
  4046. char *e;
  4047. unsigned long long new = simple_strtoull(buf, &e, 10);
  4048. unsigned long long old = mddev->suspend_lo;
  4049. if (mddev->pers == NULL ||
  4050. mddev->pers->quiesce == NULL)
  4051. return -EINVAL;
  4052. if (buf == e || (*e && *e != '\n'))
  4053. return -EINVAL;
  4054. mddev->suspend_lo = new;
  4055. if (new >= old)
  4056. /* Shrinking suspended region */
  4057. mddev->pers->quiesce(mddev, 2);
  4058. else {
  4059. /* Expanding suspended region - need to wait */
  4060. mddev->pers->quiesce(mddev, 1);
  4061. mddev->pers->quiesce(mddev, 0);
  4062. }
  4063. return len;
  4064. }
  4065. static struct md_sysfs_entry md_suspend_lo =
  4066. __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
  4067. static ssize_t
  4068. suspend_hi_show(struct mddev *mddev, char *page)
  4069. {
  4070. return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
  4071. }
  4072. static ssize_t
  4073. suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
  4074. {
  4075. char *e;
  4076. unsigned long long new = simple_strtoull(buf, &e, 10);
  4077. unsigned long long old = mddev->suspend_hi;
  4078. if (mddev->pers == NULL ||
  4079. mddev->pers->quiesce == NULL)
  4080. return -EINVAL;
  4081. if (buf == e || (*e && *e != '\n'))
  4082. return -EINVAL;
  4083. mddev->suspend_hi = new;
  4084. if (new <= old)
  4085. /* Shrinking suspended region */
  4086. mddev->pers->quiesce(mddev, 2);
  4087. else {
  4088. /* Expanding suspended region - need to wait */
  4089. mddev->pers->quiesce(mddev, 1);
  4090. mddev->pers->quiesce(mddev, 0);
  4091. }
  4092. return len;
  4093. }
  4094. static struct md_sysfs_entry md_suspend_hi =
  4095. __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
  4096. static ssize_t
  4097. reshape_position_show(struct mddev *mddev, char *page)
  4098. {
  4099. if (mddev->reshape_position != MaxSector)
  4100. return sprintf(page, "%llu\n",
  4101. (unsigned long long)mddev->reshape_position);
  4102. strcpy(page, "none\n");
  4103. return 5;
  4104. }
  4105. static ssize_t
  4106. reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
  4107. {
  4108. struct md_rdev *rdev;
  4109. char *e;
  4110. unsigned long long new = simple_strtoull(buf, &e, 10);
  4111. if (mddev->pers)
  4112. return -EBUSY;
  4113. if (buf == e || (*e && *e != '\n'))
  4114. return -EINVAL;
  4115. mddev->reshape_position = new;
  4116. mddev->delta_disks = 0;
  4117. mddev->reshape_backwards = 0;
  4118. mddev->new_level = mddev->level;
  4119. mddev->new_layout = mddev->layout;
  4120. mddev->new_chunk_sectors = mddev->chunk_sectors;
  4121. rdev_for_each(rdev, mddev)
  4122. rdev->new_data_offset = rdev->data_offset;
  4123. return len;
  4124. }
  4125. static struct md_sysfs_entry md_reshape_position =
  4126. __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
  4127. reshape_position_store);
  4128. static ssize_t
  4129. reshape_direction_show(struct mddev *mddev, char *page)
  4130. {
  4131. return sprintf(page, "%s\n",
  4132. mddev->reshape_backwards ? "backwards" : "forwards");
  4133. }
  4134. static ssize_t
  4135. reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
  4136. {
  4137. int backwards = 0;
  4138. if (cmd_match(buf, "forwards"))
  4139. backwards = 0;
  4140. else if (cmd_match(buf, "backwards"))
  4141. backwards = 1;
  4142. else
  4143. return -EINVAL;
  4144. if (mddev->reshape_backwards == backwards)
  4145. return len;
  4146. /* check if we are allowed to change */
  4147. if (mddev->delta_disks)
  4148. return -EBUSY;
  4149. if (mddev->persistent &&
  4150. mddev->major_version == 0)
  4151. return -EINVAL;
  4152. mddev->reshape_backwards = backwards;
  4153. return len;
  4154. }
  4155. static struct md_sysfs_entry md_reshape_direction =
  4156. __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
  4157. reshape_direction_store);
  4158. static ssize_t
  4159. array_size_show(struct mddev *mddev, char *page)
  4160. {
  4161. if (mddev->external_size)
  4162. return sprintf(page, "%llu\n",
  4163. (unsigned long long)mddev->array_sectors/2);
  4164. else
  4165. return sprintf(page, "default\n");
  4166. }
  4167. static ssize_t
  4168. array_size_store(struct mddev *mddev, const char *buf, size_t len)
  4169. {
  4170. sector_t sectors;
  4171. if (strncmp(buf, "default", 7) == 0) {
  4172. if (mddev->pers)
  4173. sectors = mddev->pers->size(mddev, 0, 0);
  4174. else
  4175. sectors = mddev->array_sectors;
  4176. mddev->external_size = 0;
  4177. } else {
  4178. if (strict_blocks_to_sectors(buf, &sectors) < 0)
  4179. return -EINVAL;
  4180. if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
  4181. return -E2BIG;
  4182. mddev->external_size = 1;
  4183. }
  4184. mddev->array_sectors = sectors;
  4185. if (mddev->pers) {
  4186. set_capacity(mddev->gendisk, mddev->array_sectors);
  4187. revalidate_disk(mddev->gendisk);
  4188. }
  4189. return len;
  4190. }
  4191. static struct md_sysfs_entry md_array_size =
  4192. __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
  4193. array_size_store);
  4194. static struct attribute *md_default_attrs[] = {
  4195. &md_level.attr,
  4196. &md_layout.attr,
  4197. &md_raid_disks.attr,
  4198. &md_chunk_size.attr,
  4199. &md_size.attr,
  4200. &md_resync_start.attr,
  4201. &md_metadata.attr,
  4202. &md_new_device.attr,
  4203. &md_safe_delay.attr,
  4204. &md_array_state.attr,
  4205. &md_reshape_position.attr,
  4206. &md_reshape_direction.attr,
  4207. &md_array_size.attr,
  4208. &max_corr_read_errors.attr,
  4209. NULL,
  4210. };
  4211. static struct attribute *md_redundancy_attrs[] = {
  4212. &md_scan_mode.attr,
  4213. &md_mismatches.attr,
  4214. &md_sync_min.attr,
  4215. &md_sync_max.attr,
  4216. &md_sync_speed.attr,
  4217. &md_sync_force_parallel.attr,
  4218. &md_sync_completed.attr,
  4219. &md_min_sync.attr,
  4220. &md_max_sync.attr,
  4221. &md_suspend_lo.attr,
  4222. &md_suspend_hi.attr,
  4223. &md_bitmap.attr,
  4224. &md_degraded.attr,
  4225. NULL,
  4226. };
  4227. static struct attribute_group md_redundancy_group = {
  4228. .name = NULL,
  4229. .attrs = md_redundancy_attrs,
  4230. };
  4231. static ssize_t
  4232. md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  4233. {
  4234. struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
  4235. struct mddev *mddev = container_of(kobj, struct mddev, kobj);
  4236. ssize_t rv;
  4237. if (!entry->show)
  4238. return -EIO;
  4239. spin_lock(&all_mddevs_lock);
  4240. if (list_empty(&mddev->all_mddevs)) {
  4241. spin_unlock(&all_mddevs_lock);
  4242. return -EBUSY;
  4243. }
  4244. mddev_get(mddev);
  4245. spin_unlock(&all_mddevs_lock);
  4246. rv = mddev_lock(mddev);
  4247. if (!rv) {
  4248. rv = entry->show(mddev, page);
  4249. mddev_unlock(mddev);
  4250. }
  4251. mddev_put(mddev);
  4252. return rv;
  4253. }
  4254. static ssize_t
  4255. md_attr_store(struct kobject *kobj, struct attribute *attr,
  4256. const char *page, size_t length)
  4257. {
  4258. struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
  4259. struct mddev *mddev = container_of(kobj, struct mddev, kobj);
  4260. ssize_t rv;
  4261. if (!entry->store)
  4262. return -EIO;
  4263. if (!capable(CAP_SYS_ADMIN))
  4264. return -EACCES;
  4265. spin_lock(&all_mddevs_lock);
  4266. if (list_empty(&mddev->all_mddevs)) {
  4267. spin_unlock(&all_mddevs_lock);
  4268. return -EBUSY;
  4269. }
  4270. mddev_get(mddev);
  4271. spin_unlock(&all_mddevs_lock);
  4272. rv = mddev_lock(mddev);
  4273. if (!rv) {
  4274. rv = entry->store(mddev, page, length);
  4275. mddev_unlock(mddev);
  4276. }
  4277. mddev_put(mddev);
  4278. return rv;
  4279. }
  4280. static void md_free(struct kobject *ko)
  4281. {
  4282. struct mddev *mddev = container_of(ko, struct mddev, kobj);
  4283. if (mddev->sysfs_state)
  4284. sysfs_put(mddev->sysfs_state);
  4285. if (mddev->gendisk) {
  4286. del_gendisk(mddev->gendisk);
  4287. put_disk(mddev->gendisk);
  4288. }
  4289. if (mddev->queue)
  4290. blk_cleanup_queue(mddev->queue);
  4291. kfree(mddev);
  4292. }
  4293. static const struct sysfs_ops md_sysfs_ops = {
  4294. .show = md_attr_show,
  4295. .store = md_attr_store,
  4296. };
  4297. static struct kobj_type md_ktype = {
  4298. .release = md_free,
  4299. .sysfs_ops = &md_sysfs_ops,
  4300. .default_attrs = md_default_attrs,
  4301. };
  4302. int mdp_major = 0;
  4303. static void mddev_delayed_delete(struct work_struct *ws)
  4304. {
  4305. struct mddev *mddev = container_of(ws, struct mddev, del_work);
  4306. sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
  4307. kobject_del(&mddev->kobj);
  4308. kobject_put(&mddev->kobj);
  4309. }
  4310. static int md_alloc(dev_t dev, char *name)
  4311. {
  4312. static DEFINE_MUTEX(disks_mutex);
  4313. struct mddev *mddev = mddev_find(dev);
  4314. struct gendisk *disk;
  4315. int partitioned;
  4316. int shift;
  4317. int unit;
  4318. int error;
  4319. if (!mddev)
  4320. return -ENODEV;
  4321. partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
  4322. shift = partitioned ? MdpMinorShift : 0;
  4323. unit = MINOR(mddev->unit) >> shift;
  4324. /* wait for any previous instance of this device to be
  4325. * completely removed (mddev_delayed_delete).
  4326. */
  4327. flush_workqueue(md_misc_wq);
  4328. mutex_lock(&disks_mutex);
  4329. error = -EEXIST;
  4330. if (mddev->gendisk)
  4331. goto abort;
  4332. if (name) {
  4333. /* Need to ensure that 'name' is not a duplicate.
  4334. */
  4335. struct mddev *mddev2;
  4336. spin_lock(&all_mddevs_lock);
  4337. list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
  4338. if (mddev2->gendisk &&
  4339. strcmp(mddev2->gendisk->disk_name, name) == 0) {
  4340. spin_unlock(&all_mddevs_lock);
  4341. goto abort;
  4342. }
  4343. spin_unlock(&all_mddevs_lock);
  4344. }
  4345. error = -ENOMEM;
  4346. mddev->queue = blk_alloc_queue(GFP_KERNEL);
  4347. if (!mddev->queue)
  4348. goto abort;
  4349. mddev->queue->queuedata = mddev;
  4350. blk_queue_make_request(mddev->queue, md_make_request);
  4351. blk_set_stacking_limits(&mddev->queue->limits);
  4352. disk = alloc_disk(1 << shift);
  4353. if (!disk) {
  4354. blk_cleanup_queue(mddev->queue);
  4355. mddev->queue = NULL;
  4356. goto abort;
  4357. }
  4358. disk->major = MAJOR(mddev->unit);
  4359. disk->first_minor = unit << shift;
  4360. if (name)
  4361. strcpy(disk->disk_name, name);
  4362. else if (partitioned)
  4363. sprintf(disk->disk_name, "md_d%d", unit);
  4364. else
  4365. sprintf(disk->disk_name, "md%d", unit);
  4366. disk->fops = &md_fops;
  4367. disk->private_data = mddev;
  4368. disk->queue = mddev->queue;
  4369. blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
  4370. /* Allow extended partitions. This makes the
  4371. * 'mdp' device redundant, but we can't really
  4372. * remove it now.
  4373. */
  4374. disk->flags |= GENHD_FL_EXT_DEVT;
  4375. mddev->gendisk = disk;
  4376. /* As soon as we call add_disk(), another thread could get
  4377. * through to md_open, so make sure it doesn't get too far
  4378. */
  4379. mutex_lock(&mddev->open_mutex);
  4380. add_disk(disk);
  4381. error = kobject_init_and_add(&mddev->kobj, &md_ktype,
  4382. &disk_to_dev(disk)->kobj, "%s", "md");
  4383. if (error) {
  4384. /* This isn't possible, but as kobject_init_and_add is marked
  4385. * __must_check, we must do something with the result
  4386. */
  4387. printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
  4388. disk->disk_name);
  4389. error = 0;
  4390. }
  4391. if (mddev->kobj.sd &&
  4392. sysfs_create_group(&mddev->kobj, &md_bitmap_group))
  4393. printk(KERN_DEBUG "pointless warning\n");
  4394. mutex_unlock(&mddev->open_mutex);
  4395. abort:
  4396. mutex_unlock(&disks_mutex);
  4397. if (!error && mddev->kobj.sd) {
  4398. kobject_uevent(&mddev->kobj, KOBJ_ADD);
  4399. mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
  4400. }
  4401. mddev_put(mddev);
  4402. return error;
  4403. }
  4404. static struct kobject *md_probe(dev_t dev, int *part, void *data)
  4405. {
  4406. md_alloc(dev, NULL);
  4407. return NULL;
  4408. }
  4409. static int add_named_array(const char *val, struct kernel_param *kp)
  4410. {
  4411. /* val must be "md_*" where * is not all digits.
  4412. * We allocate an array with a large free minor number, and
  4413. * set the name to val. val must not already be an active name.
  4414. */
  4415. int len = strlen(val);
  4416. char buf[DISK_NAME_LEN];
  4417. while (len && val[len-1] == '\n')
  4418. len--;
  4419. if (len >= DISK_NAME_LEN)
  4420. return -E2BIG;
  4421. strlcpy(buf, val, len+1);
  4422. if (strncmp(buf, "md_", 3) != 0)
  4423. return -EINVAL;
  4424. return md_alloc(0, buf);
  4425. }
  4426. static void md_safemode_timeout(unsigned long data)
  4427. {
  4428. struct mddev *mddev = (struct mddev *) data;
  4429. if (!atomic_read(&mddev->writes_pending)) {
  4430. mddev->safemode = 1;
  4431. if (mddev->external)
  4432. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4433. }
  4434. md_wakeup_thread(mddev->thread);
  4435. }
  4436. static int start_dirty_degraded;
  4437. int md_run(struct mddev *mddev)
  4438. {
  4439. int err;
  4440. struct md_rdev *rdev;
  4441. struct md_personality *pers;
  4442. if (list_empty(&mddev->disks))
  4443. /* cannot run an array with no devices.. */
  4444. return -EINVAL;
  4445. if (mddev->pers)
  4446. return -EBUSY;
  4447. /* Cannot run until previous stop completes properly */
  4448. if (mddev->sysfs_active)
  4449. return -EBUSY;
  4450. /*
  4451. * Analyze all RAID superblock(s)
  4452. */
  4453. if (!mddev->raid_disks) {
  4454. if (!mddev->persistent)
  4455. return -EINVAL;
  4456. analyze_sbs(mddev);
  4457. }
  4458. if (mddev->level != LEVEL_NONE)
  4459. request_module("md-level-%d", mddev->level);
  4460. else if (mddev->clevel[0])
  4461. request_module("md-%s", mddev->clevel);
  4462. /*
  4463. * Drop all container device buffers, from now on
  4464. * the only valid external interface is through the md
  4465. * device.
  4466. */
  4467. rdev_for_each(rdev, mddev) {
  4468. if (test_bit(Faulty, &rdev->flags))
  4469. continue;
  4470. sync_blockdev(rdev->bdev);
  4471. invalidate_bdev(rdev->bdev);
  4472. /* perform some consistency tests on the device.
  4473. * We don't want the data to overlap the metadata,
  4474. * Internal Bitmap issues have been handled elsewhere.
  4475. */
  4476. if (rdev->meta_bdev) {
  4477. /* Nothing to check */;
  4478. } else if (rdev->data_offset < rdev->sb_start) {
  4479. if (mddev->dev_sectors &&
  4480. rdev->data_offset + mddev->dev_sectors
  4481. > rdev->sb_start) {
  4482. printk("md: %s: data overlaps metadata\n",
  4483. mdname(mddev));
  4484. return -EINVAL;
  4485. }
  4486. } else {
  4487. if (rdev->sb_start + rdev->sb_size/512
  4488. > rdev->data_offset) {
  4489. printk("md: %s: metadata overlaps data\n",
  4490. mdname(mddev));
  4491. return -EINVAL;
  4492. }
  4493. }
  4494. sysfs_notify_dirent_safe(rdev->sysfs_state);
  4495. }
  4496. if (mddev->bio_set == NULL)
  4497. mddev->bio_set = bioset_create(BIO_POOL_SIZE,
  4498. sizeof(struct mddev *));
  4499. spin_lock(&pers_lock);
  4500. pers = find_pers(mddev->level, mddev->clevel);
  4501. if (!pers || !try_module_get(pers->owner)) {
  4502. spin_unlock(&pers_lock);
  4503. if (mddev->level != LEVEL_NONE)
  4504. printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
  4505. mddev->level);
  4506. else
  4507. printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
  4508. mddev->clevel);
  4509. return -EINVAL;
  4510. }
  4511. mddev->pers = pers;
  4512. spin_unlock(&pers_lock);
  4513. if (mddev->level != pers->level) {
  4514. mddev->level = pers->level;
  4515. mddev->new_level = pers->level;
  4516. }
  4517. strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
  4518. if (mddev->reshape_position != MaxSector &&
  4519. pers->start_reshape == NULL) {
  4520. /* This personality cannot handle reshaping... */
  4521. mddev->pers = NULL;
  4522. module_put(pers->owner);
  4523. return -EINVAL;
  4524. }
  4525. if (pers->sync_request) {
  4526. /* Warn if this is a potentially silly
  4527. * configuration.
  4528. */
  4529. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  4530. struct md_rdev *rdev2;
  4531. int warned = 0;
  4532. rdev_for_each(rdev, mddev)
  4533. rdev_for_each(rdev2, mddev) {
  4534. if (rdev < rdev2 &&
  4535. rdev->bdev->bd_contains ==
  4536. rdev2->bdev->bd_contains) {
  4537. printk(KERN_WARNING
  4538. "%s: WARNING: %s appears to be"
  4539. " on the same physical disk as"
  4540. " %s.\n",
  4541. mdname(mddev),
  4542. bdevname(rdev->bdev,b),
  4543. bdevname(rdev2->bdev,b2));
  4544. warned = 1;
  4545. }
  4546. }
  4547. if (warned)
  4548. printk(KERN_WARNING
  4549. "True protection against single-disk"
  4550. " failure might be compromised.\n");
  4551. }
  4552. mddev->recovery = 0;
  4553. /* may be over-ridden by personality */
  4554. mddev->resync_max_sectors = mddev->dev_sectors;
  4555. mddev->ok_start_degraded = start_dirty_degraded;
  4556. if (start_readonly && mddev->ro == 0)
  4557. mddev->ro = 2; /* read-only, but switch on first write */
  4558. err = mddev->pers->run(mddev);
  4559. if (err)
  4560. printk(KERN_ERR "md: pers->run() failed ...\n");
  4561. else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
  4562. WARN_ONCE(!mddev->external_size, "%s: default size too small,"
  4563. " but 'external_size' not in effect?\n", __func__);
  4564. printk(KERN_ERR
  4565. "md: invalid array_size %llu > default size %llu\n",
  4566. (unsigned long long)mddev->array_sectors / 2,
  4567. (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
  4568. err = -EINVAL;
  4569. mddev->pers->stop(mddev);
  4570. }
  4571. if (err == 0 && mddev->pers->sync_request &&
  4572. (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
  4573. err = bitmap_create(mddev);
  4574. if (err) {
  4575. printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
  4576. mdname(mddev), err);
  4577. mddev->pers->stop(mddev);
  4578. }
  4579. }
  4580. if (err) {
  4581. module_put(mddev->pers->owner);
  4582. mddev->pers = NULL;
  4583. bitmap_destroy(mddev);
  4584. return err;
  4585. }
  4586. if (mddev->pers->sync_request) {
  4587. if (mddev->kobj.sd &&
  4588. sysfs_create_group(&mddev->kobj, &md_redundancy_group))
  4589. printk(KERN_WARNING
  4590. "md: cannot register extra attributes for %s\n",
  4591. mdname(mddev));
  4592. mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
  4593. } else if (mddev->ro == 2) /* auto-readonly not meaningful */
  4594. mddev->ro = 0;
  4595. atomic_set(&mddev->writes_pending,0);
  4596. atomic_set(&mddev->max_corr_read_errors,
  4597. MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
  4598. mddev->safemode = 0;
  4599. mddev->safemode_timer.function = md_safemode_timeout;
  4600. mddev->safemode_timer.data = (unsigned long) mddev;
  4601. mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
  4602. mddev->in_sync = 1;
  4603. smp_wmb();
  4604. mddev->ready = 1;
  4605. rdev_for_each(rdev, mddev)
  4606. if (rdev->raid_disk >= 0)
  4607. if (sysfs_link_rdev(mddev, rdev))
  4608. /* failure here is OK */;
  4609. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  4610. if (mddev->flags)
  4611. md_update_sb(mddev, 0);
  4612. md_new_event(mddev);
  4613. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4614. sysfs_notify_dirent_safe(mddev->sysfs_action);
  4615. sysfs_notify(&mddev->kobj, NULL, "degraded");
  4616. return 0;
  4617. }
  4618. EXPORT_SYMBOL_GPL(md_run);
  4619. static int do_md_run(struct mddev *mddev)
  4620. {
  4621. int err;
  4622. err = md_run(mddev);
  4623. if (err)
  4624. goto out;
  4625. err = bitmap_load(mddev);
  4626. if (err) {
  4627. bitmap_destroy(mddev);
  4628. goto out;
  4629. }
  4630. md_wakeup_thread(mddev->thread);
  4631. md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
  4632. set_capacity(mddev->gendisk, mddev->array_sectors);
  4633. revalidate_disk(mddev->gendisk);
  4634. mddev->changed = 1;
  4635. kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
  4636. out:
  4637. return err;
  4638. }
  4639. static int restart_array(struct mddev *mddev)
  4640. {
  4641. struct gendisk *disk = mddev->gendisk;
  4642. /* Complain if it has no devices */
  4643. if (list_empty(&mddev->disks))
  4644. return -ENXIO;
  4645. if (!mddev->pers)
  4646. return -EINVAL;
  4647. if (!mddev->ro)
  4648. return -EBUSY;
  4649. mddev->safemode = 0;
  4650. mddev->ro = 0;
  4651. set_disk_ro(disk, 0);
  4652. printk(KERN_INFO "md: %s switched to read-write mode.\n",
  4653. mdname(mddev));
  4654. /* Kick recovery or resync if necessary */
  4655. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  4656. md_wakeup_thread(mddev->thread);
  4657. md_wakeup_thread(mddev->sync_thread);
  4658. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4659. return 0;
  4660. }
  4661. /* similar to deny_write_access, but accounts for our holding a reference
  4662. * to the file ourselves */
  4663. static int deny_bitmap_write_access(struct file * file)
  4664. {
  4665. struct inode *inode = file->f_mapping->host;
  4666. spin_lock(&inode->i_lock);
  4667. if (atomic_read(&inode->i_writecount) > 1) {
  4668. spin_unlock(&inode->i_lock);
  4669. return -ETXTBSY;
  4670. }
  4671. atomic_set(&inode->i_writecount, -1);
  4672. spin_unlock(&inode->i_lock);
  4673. return 0;
  4674. }
  4675. void restore_bitmap_write_access(struct file *file)
  4676. {
  4677. struct inode *inode = file->f_mapping->host;
  4678. spin_lock(&inode->i_lock);
  4679. atomic_set(&inode->i_writecount, 1);
  4680. spin_unlock(&inode->i_lock);
  4681. }
  4682. static void md_clean(struct mddev *mddev)
  4683. {
  4684. mddev->array_sectors = 0;
  4685. mddev->external_size = 0;
  4686. mddev->dev_sectors = 0;
  4687. mddev->raid_disks = 0;
  4688. mddev->recovery_cp = 0;
  4689. mddev->resync_min = 0;
  4690. mddev->resync_max = MaxSector;
  4691. mddev->reshape_position = MaxSector;
  4692. mddev->external = 0;
  4693. mddev->persistent = 0;
  4694. mddev->level = LEVEL_NONE;
  4695. mddev->clevel[0] = 0;
  4696. mddev->flags = 0;
  4697. mddev->ro = 0;
  4698. mddev->metadata_type[0] = 0;
  4699. mddev->chunk_sectors = 0;
  4700. mddev->ctime = mddev->utime = 0;
  4701. mddev->layout = 0;
  4702. mddev->max_disks = 0;
  4703. mddev->events = 0;
  4704. mddev->can_decrease_events = 0;
  4705. mddev->delta_disks = 0;
  4706. mddev->reshape_backwards = 0;
  4707. mddev->new_level = LEVEL_NONE;
  4708. mddev->new_layout = 0;
  4709. mddev->new_chunk_sectors = 0;
  4710. mddev->curr_resync = 0;
  4711. mddev->resync_mismatches = 0;
  4712. mddev->suspend_lo = mddev->suspend_hi = 0;
  4713. mddev->sync_speed_min = mddev->sync_speed_max = 0;
  4714. mddev->recovery = 0;
  4715. mddev->in_sync = 0;
  4716. mddev->changed = 0;
  4717. mddev->degraded = 0;
  4718. mddev->safemode = 0;
  4719. mddev->merge_check_needed = 0;
  4720. mddev->bitmap_info.offset = 0;
  4721. mddev->bitmap_info.default_offset = 0;
  4722. mddev->bitmap_info.default_space = 0;
  4723. mddev->bitmap_info.chunksize = 0;
  4724. mddev->bitmap_info.daemon_sleep = 0;
  4725. mddev->bitmap_info.max_write_behind = 0;
  4726. }
  4727. static void __md_stop_writes(struct mddev *mddev)
  4728. {
  4729. if (mddev->sync_thread) {
  4730. set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4731. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  4732. reap_sync_thread(mddev);
  4733. }
  4734. del_timer_sync(&mddev->safemode_timer);
  4735. bitmap_flush(mddev);
  4736. md_super_wait(mddev);
  4737. if (!mddev->in_sync || mddev->flags) {
  4738. /* mark array as shutdown cleanly */
  4739. mddev->in_sync = 1;
  4740. md_update_sb(mddev, 1);
  4741. }
  4742. }
  4743. void md_stop_writes(struct mddev *mddev)
  4744. {
  4745. mddev_lock(mddev);
  4746. __md_stop_writes(mddev);
  4747. mddev_unlock(mddev);
  4748. }
  4749. EXPORT_SYMBOL_GPL(md_stop_writes);
  4750. void md_stop(struct mddev *mddev)
  4751. {
  4752. mddev->ready = 0;
  4753. mddev->pers->stop(mddev);
  4754. if (mddev->pers->sync_request && mddev->to_remove == NULL)
  4755. mddev->to_remove = &md_redundancy_group;
  4756. module_put(mddev->pers->owner);
  4757. mddev->pers = NULL;
  4758. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4759. }
  4760. EXPORT_SYMBOL_GPL(md_stop);
  4761. static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
  4762. {
  4763. int err = 0;
  4764. mutex_lock(&mddev->open_mutex);
  4765. if (atomic_read(&mddev->openers) > !!bdev) {
  4766. printk("md: %s still in use.\n",mdname(mddev));
  4767. err = -EBUSY;
  4768. goto out;
  4769. }
  4770. if (bdev)
  4771. sync_blockdev(bdev);
  4772. if (mddev->pers) {
  4773. __md_stop_writes(mddev);
  4774. err = -ENXIO;
  4775. if (mddev->ro==1)
  4776. goto out;
  4777. mddev->ro = 1;
  4778. set_disk_ro(mddev->gendisk, 1);
  4779. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4780. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4781. err = 0;
  4782. }
  4783. out:
  4784. mutex_unlock(&mddev->open_mutex);
  4785. return err;
  4786. }
  4787. /* mode:
  4788. * 0 - completely stop and dis-assemble array
  4789. * 2 - stop but do not disassemble array
  4790. */
  4791. static int do_md_stop(struct mddev * mddev, int mode,
  4792. struct block_device *bdev)
  4793. {
  4794. struct gendisk *disk = mddev->gendisk;
  4795. struct md_rdev *rdev;
  4796. mutex_lock(&mddev->open_mutex);
  4797. if (atomic_read(&mddev->openers) > !!bdev ||
  4798. mddev->sysfs_active) {
  4799. printk("md: %s still in use.\n",mdname(mddev));
  4800. mutex_unlock(&mddev->open_mutex);
  4801. return -EBUSY;
  4802. }
  4803. if (bdev)
  4804. /* It is possible IO was issued on some other
  4805. * open file which was closed before we took ->open_mutex.
  4806. * As that was not the last close __blkdev_put will not
  4807. * have called sync_blockdev, so we must.
  4808. */
  4809. sync_blockdev(bdev);
  4810. if (mddev->pers) {
  4811. if (mddev->ro)
  4812. set_disk_ro(disk, 0);
  4813. __md_stop_writes(mddev);
  4814. md_stop(mddev);
  4815. mddev->queue->merge_bvec_fn = NULL;
  4816. mddev->queue->backing_dev_info.congested_fn = NULL;
  4817. /* tell userspace to handle 'inactive' */
  4818. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4819. rdev_for_each(rdev, mddev)
  4820. if (rdev->raid_disk >= 0)
  4821. sysfs_unlink_rdev(mddev, rdev);
  4822. set_capacity(disk, 0);
  4823. mutex_unlock(&mddev->open_mutex);
  4824. mddev->changed = 1;
  4825. revalidate_disk(disk);
  4826. if (mddev->ro)
  4827. mddev->ro = 0;
  4828. } else
  4829. mutex_unlock(&mddev->open_mutex);
  4830. /*
  4831. * Free resources if final stop
  4832. */
  4833. if (mode == 0) {
  4834. printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
  4835. bitmap_destroy(mddev);
  4836. if (mddev->bitmap_info.file) {
  4837. restore_bitmap_write_access(mddev->bitmap_info.file);
  4838. fput(mddev->bitmap_info.file);
  4839. mddev->bitmap_info.file = NULL;
  4840. }
  4841. mddev->bitmap_info.offset = 0;
  4842. export_array(mddev);
  4843. md_clean(mddev);
  4844. kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
  4845. if (mddev->hold_active == UNTIL_STOP)
  4846. mddev->hold_active = 0;
  4847. }
  4848. blk_integrity_unregister(disk);
  4849. md_new_event(mddev);
  4850. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4851. return 0;
  4852. }
  4853. #ifndef MODULE
  4854. static void autorun_array(struct mddev *mddev)
  4855. {
  4856. struct md_rdev *rdev;
  4857. int err;
  4858. if (list_empty(&mddev->disks))
  4859. return;
  4860. printk(KERN_INFO "md: running: ");
  4861. rdev_for_each(rdev, mddev) {
  4862. char b[BDEVNAME_SIZE];
  4863. printk("<%s>", bdevname(rdev->bdev,b));
  4864. }
  4865. printk("\n");
  4866. err = do_md_run(mddev);
  4867. if (err) {
  4868. printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
  4869. do_md_stop(mddev, 0, NULL);
  4870. }
  4871. }
  4872. /*
  4873. * lets try to run arrays based on all disks that have arrived
  4874. * until now. (those are in pending_raid_disks)
  4875. *
  4876. * the method: pick the first pending disk, collect all disks with
  4877. * the same UUID, remove all from the pending list and put them into
  4878. * the 'same_array' list. Then order this list based on superblock
  4879. * update time (freshest comes first), kick out 'old' disks and
  4880. * compare superblocks. If everything's fine then run it.
  4881. *
  4882. * If "unit" is allocated, then bump its reference count
  4883. */
  4884. static void autorun_devices(int part)
  4885. {
  4886. struct md_rdev *rdev0, *rdev, *tmp;
  4887. struct mddev *mddev;
  4888. char b[BDEVNAME_SIZE];
  4889. printk(KERN_INFO "md: autorun ...\n");
  4890. while (!list_empty(&pending_raid_disks)) {
  4891. int unit;
  4892. dev_t dev;
  4893. LIST_HEAD(candidates);
  4894. rdev0 = list_entry(pending_raid_disks.next,
  4895. struct md_rdev, same_set);
  4896. printk(KERN_INFO "md: considering %s ...\n",
  4897. bdevname(rdev0->bdev,b));
  4898. INIT_LIST_HEAD(&candidates);
  4899. rdev_for_each_list(rdev, tmp, &pending_raid_disks)
  4900. if (super_90_load(rdev, rdev0, 0) >= 0) {
  4901. printk(KERN_INFO "md: adding %s ...\n",
  4902. bdevname(rdev->bdev,b));
  4903. list_move(&rdev->same_set, &candidates);
  4904. }
  4905. /*
  4906. * now we have a set of devices, with all of them having
  4907. * mostly sane superblocks. It's time to allocate the
  4908. * mddev.
  4909. */
  4910. if (part) {
  4911. dev = MKDEV(mdp_major,
  4912. rdev0->preferred_minor << MdpMinorShift);
  4913. unit = MINOR(dev) >> MdpMinorShift;
  4914. } else {
  4915. dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
  4916. unit = MINOR(dev);
  4917. }
  4918. if (rdev0->preferred_minor != unit) {
  4919. printk(KERN_INFO "md: unit number in %s is bad: %d\n",
  4920. bdevname(rdev0->bdev, b), rdev0->preferred_minor);
  4921. break;
  4922. }
  4923. md_probe(dev, NULL, NULL);
  4924. mddev = mddev_find(dev);
  4925. if (!mddev || !mddev->gendisk) {
  4926. if (mddev)
  4927. mddev_put(mddev);
  4928. printk(KERN_ERR
  4929. "md: cannot allocate memory for md drive.\n");
  4930. break;
  4931. }
  4932. if (mddev_lock(mddev))
  4933. printk(KERN_WARNING "md: %s locked, cannot run\n",
  4934. mdname(mddev));
  4935. else if (mddev->raid_disks || mddev->major_version
  4936. || !list_empty(&mddev->disks)) {
  4937. printk(KERN_WARNING
  4938. "md: %s already running, cannot run %s\n",
  4939. mdname(mddev), bdevname(rdev0->bdev,b));
  4940. mddev_unlock(mddev);
  4941. } else {
  4942. printk(KERN_INFO "md: created %s\n", mdname(mddev));
  4943. mddev->persistent = 1;
  4944. rdev_for_each_list(rdev, tmp, &candidates) {
  4945. list_del_init(&rdev->same_set);
  4946. if (bind_rdev_to_array(rdev, mddev))
  4947. export_rdev(rdev);
  4948. }
  4949. autorun_array(mddev);
  4950. mddev_unlock(mddev);
  4951. }
  4952. /* on success, candidates will be empty, on error
  4953. * it won't...
  4954. */
  4955. rdev_for_each_list(rdev, tmp, &candidates) {
  4956. list_del_init(&rdev->same_set);
  4957. export_rdev(rdev);
  4958. }
  4959. mddev_put(mddev);
  4960. }
  4961. printk(KERN_INFO "md: ... autorun DONE.\n");
  4962. }
  4963. #endif /* !MODULE */
  4964. static int get_version(void __user * arg)
  4965. {
  4966. mdu_version_t ver;
  4967. ver.major = MD_MAJOR_VERSION;
  4968. ver.minor = MD_MINOR_VERSION;
  4969. ver.patchlevel = MD_PATCHLEVEL_VERSION;
  4970. if (copy_to_user(arg, &ver, sizeof(ver)))
  4971. return -EFAULT;
  4972. return 0;
  4973. }
  4974. static int get_array_info(struct mddev * mddev, void __user * arg)
  4975. {
  4976. mdu_array_info_t info;
  4977. int nr,working,insync,failed,spare;
  4978. struct md_rdev *rdev;
  4979. nr=working=insync=failed=spare=0;
  4980. rdev_for_each(rdev, mddev) {
  4981. nr++;
  4982. if (test_bit(Faulty, &rdev->flags))
  4983. failed++;
  4984. else {
  4985. working++;
  4986. if (test_bit(In_sync, &rdev->flags))
  4987. insync++;
  4988. else
  4989. spare++;
  4990. }
  4991. }
  4992. info.major_version = mddev->major_version;
  4993. info.minor_version = mddev->minor_version;
  4994. info.patch_version = MD_PATCHLEVEL_VERSION;
  4995. info.ctime = mddev->ctime;
  4996. info.level = mddev->level;
  4997. info.size = mddev->dev_sectors / 2;
  4998. if (info.size != mddev->dev_sectors / 2) /* overflow */
  4999. info.size = -1;
  5000. info.nr_disks = nr;
  5001. info.raid_disks = mddev->raid_disks;
  5002. info.md_minor = mddev->md_minor;
  5003. info.not_persistent= !mddev->persistent;
  5004. info.utime = mddev->utime;
  5005. info.state = 0;
  5006. if (mddev->in_sync)
  5007. info.state = (1<<MD_SB_CLEAN);
  5008. if (mddev->bitmap && mddev->bitmap_info.offset)
  5009. info.state = (1<<MD_SB_BITMAP_PRESENT);
  5010. info.active_disks = insync;
  5011. info.working_disks = working;
  5012. info.failed_disks = failed;
  5013. info.spare_disks = spare;
  5014. info.layout = mddev->layout;
  5015. info.chunk_size = mddev->chunk_sectors << 9;
  5016. if (copy_to_user(arg, &info, sizeof(info)))
  5017. return -EFAULT;
  5018. return 0;
  5019. }
  5020. static int get_bitmap_file(struct mddev * mddev, void __user * arg)
  5021. {
  5022. mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
  5023. char *ptr, *buf = NULL;
  5024. int err = -ENOMEM;
  5025. if (md_allow_write(mddev))
  5026. file = kmalloc(sizeof(*file), GFP_NOIO);
  5027. else
  5028. file = kmalloc(sizeof(*file), GFP_KERNEL);
  5029. if (!file)
  5030. goto out;
  5031. /* bitmap disabled, zero the first byte and copy out */
  5032. if (!mddev->bitmap || !mddev->bitmap->storage.file) {
  5033. file->pathname[0] = '\0';
  5034. goto copy_out;
  5035. }
  5036. buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
  5037. if (!buf)
  5038. goto out;
  5039. ptr = d_path(&mddev->bitmap->storage.file->f_path,
  5040. buf, sizeof(file->pathname));
  5041. if (IS_ERR(ptr))
  5042. goto out;
  5043. strcpy(file->pathname, ptr);
  5044. copy_out:
  5045. err = 0;
  5046. if (copy_to_user(arg, file, sizeof(*file)))
  5047. err = -EFAULT;
  5048. out:
  5049. kfree(buf);
  5050. kfree(file);
  5051. return err;
  5052. }
  5053. static int get_disk_info(struct mddev * mddev, void __user * arg)
  5054. {
  5055. mdu_disk_info_t info;
  5056. struct md_rdev *rdev;
  5057. if (copy_from_user(&info, arg, sizeof(info)))
  5058. return -EFAULT;
  5059. rdev = find_rdev_nr(mddev, info.number);
  5060. if (rdev) {
  5061. info.major = MAJOR(rdev->bdev->bd_dev);
  5062. info.minor = MINOR(rdev->bdev->bd_dev);
  5063. info.raid_disk = rdev->raid_disk;
  5064. info.state = 0;
  5065. if (test_bit(Faulty, &rdev->flags))
  5066. info.state |= (1<<MD_DISK_FAULTY);
  5067. else if (test_bit(In_sync, &rdev->flags)) {
  5068. info.state |= (1<<MD_DISK_ACTIVE);
  5069. info.state |= (1<<MD_DISK_SYNC);
  5070. }
  5071. if (test_bit(WriteMostly, &rdev->flags))
  5072. info.state |= (1<<MD_DISK_WRITEMOSTLY);
  5073. } else {
  5074. info.major = info.minor = 0;
  5075. info.raid_disk = -1;
  5076. info.state = (1<<MD_DISK_REMOVED);
  5077. }
  5078. if (copy_to_user(arg, &info, sizeof(info)))
  5079. return -EFAULT;
  5080. return 0;
  5081. }
  5082. static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
  5083. {
  5084. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  5085. struct md_rdev *rdev;
  5086. dev_t dev = MKDEV(info->major,info->minor);
  5087. if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
  5088. return -EOVERFLOW;
  5089. if (!mddev->raid_disks) {
  5090. int err;
  5091. /* expecting a device which has a superblock */
  5092. rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
  5093. if (IS_ERR(rdev)) {
  5094. printk(KERN_WARNING
  5095. "md: md_import_device returned %ld\n",
  5096. PTR_ERR(rdev));
  5097. return PTR_ERR(rdev);
  5098. }
  5099. if (!list_empty(&mddev->disks)) {
  5100. struct md_rdev *rdev0
  5101. = list_entry(mddev->disks.next,
  5102. struct md_rdev, same_set);
  5103. err = super_types[mddev->major_version]
  5104. .load_super(rdev, rdev0, mddev->minor_version);
  5105. if (err < 0) {
  5106. printk(KERN_WARNING
  5107. "md: %s has different UUID to %s\n",
  5108. bdevname(rdev->bdev,b),
  5109. bdevname(rdev0->bdev,b2));
  5110. export_rdev(rdev);
  5111. return -EINVAL;
  5112. }
  5113. }
  5114. err = bind_rdev_to_array(rdev, mddev);
  5115. if (err)
  5116. export_rdev(rdev);
  5117. return err;
  5118. }
  5119. /*
  5120. * add_new_disk can be used once the array is assembled
  5121. * to add "hot spares". They must already have a superblock
  5122. * written
  5123. */
  5124. if (mddev->pers) {
  5125. int err;
  5126. if (!mddev->pers->hot_add_disk) {
  5127. printk(KERN_WARNING
  5128. "%s: personality does not support diskops!\n",
  5129. mdname(mddev));
  5130. return -EINVAL;
  5131. }
  5132. if (mddev->persistent)
  5133. rdev = md_import_device(dev, mddev->major_version,
  5134. mddev->minor_version);
  5135. else
  5136. rdev = md_import_device(dev, -1, -1);
  5137. if (IS_ERR(rdev)) {
  5138. printk(KERN_WARNING
  5139. "md: md_import_device returned %ld\n",
  5140. PTR_ERR(rdev));
  5141. return PTR_ERR(rdev);
  5142. }
  5143. /* set saved_raid_disk if appropriate */
  5144. if (!mddev->persistent) {
  5145. if (info->state & (1<<MD_DISK_SYNC) &&
  5146. info->raid_disk < mddev->raid_disks) {
  5147. rdev->raid_disk = info->raid_disk;
  5148. set_bit(In_sync, &rdev->flags);
  5149. } else
  5150. rdev->raid_disk = -1;
  5151. } else
  5152. super_types[mddev->major_version].
  5153. validate_super(mddev, rdev);
  5154. if ((info->state & (1<<MD_DISK_SYNC)) &&
  5155. rdev->raid_disk != info->raid_disk) {
  5156. /* This was a hot-add request, but events doesn't
  5157. * match, so reject it.
  5158. */
  5159. export_rdev(rdev);
  5160. return -EINVAL;
  5161. }
  5162. if (test_bit(In_sync, &rdev->flags))
  5163. rdev->saved_raid_disk = rdev->raid_disk;
  5164. else
  5165. rdev->saved_raid_disk = -1;
  5166. clear_bit(In_sync, &rdev->flags); /* just to be sure */
  5167. if (info->state & (1<<MD_DISK_WRITEMOSTLY))
  5168. set_bit(WriteMostly, &rdev->flags);
  5169. else
  5170. clear_bit(WriteMostly, &rdev->flags);
  5171. rdev->raid_disk = -1;
  5172. err = bind_rdev_to_array(rdev, mddev);
  5173. if (!err && !mddev->pers->hot_remove_disk) {
  5174. /* If there is hot_add_disk but no hot_remove_disk
  5175. * then added disks for geometry changes,
  5176. * and should be added immediately.
  5177. */
  5178. super_types[mddev->major_version].
  5179. validate_super(mddev, rdev);
  5180. err = mddev->pers->hot_add_disk(mddev, rdev);
  5181. if (err)
  5182. unbind_rdev_from_array(rdev);
  5183. }
  5184. if (err)
  5185. export_rdev(rdev);
  5186. else
  5187. sysfs_notify_dirent_safe(rdev->sysfs_state);
  5188. md_update_sb(mddev, 1);
  5189. if (mddev->degraded)
  5190. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  5191. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5192. if (!err)
  5193. md_new_event(mddev);
  5194. md_wakeup_thread(mddev->thread);
  5195. return err;
  5196. }
  5197. /* otherwise, add_new_disk is only allowed
  5198. * for major_version==0 superblocks
  5199. */
  5200. if (mddev->major_version != 0) {
  5201. printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
  5202. mdname(mddev));
  5203. return -EINVAL;
  5204. }
  5205. if (!(info->state & (1<<MD_DISK_FAULTY))) {
  5206. int err;
  5207. rdev = md_import_device(dev, -1, 0);
  5208. if (IS_ERR(rdev)) {
  5209. printk(KERN_WARNING
  5210. "md: error, md_import_device() returned %ld\n",
  5211. PTR_ERR(rdev));
  5212. return PTR_ERR(rdev);
  5213. }
  5214. rdev->desc_nr = info->number;
  5215. if (info->raid_disk < mddev->raid_disks)
  5216. rdev->raid_disk = info->raid_disk;
  5217. else
  5218. rdev->raid_disk = -1;
  5219. if (rdev->raid_disk < mddev->raid_disks)
  5220. if (info->state & (1<<MD_DISK_SYNC))
  5221. set_bit(In_sync, &rdev->flags);
  5222. if (info->state & (1<<MD_DISK_WRITEMOSTLY))
  5223. set_bit(WriteMostly, &rdev->flags);
  5224. if (!mddev->persistent) {
  5225. printk(KERN_INFO "md: nonpersistent superblock ...\n");
  5226. rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
  5227. } else
  5228. rdev->sb_start = calc_dev_sboffset(rdev);
  5229. rdev->sectors = rdev->sb_start;
  5230. err = bind_rdev_to_array(rdev, mddev);
  5231. if (err) {
  5232. export_rdev(rdev);
  5233. return err;
  5234. }
  5235. }
  5236. return 0;
  5237. }
  5238. static int hot_remove_disk(struct mddev * mddev, dev_t dev)
  5239. {
  5240. char b[BDEVNAME_SIZE];
  5241. struct md_rdev *rdev;
  5242. rdev = find_rdev(mddev, dev);
  5243. if (!rdev)
  5244. return -ENXIO;
  5245. if (rdev->raid_disk >= 0)
  5246. goto busy;
  5247. kick_rdev_from_array(rdev);
  5248. md_update_sb(mddev, 1);
  5249. md_new_event(mddev);
  5250. return 0;
  5251. busy:
  5252. printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
  5253. bdevname(rdev->bdev,b), mdname(mddev));
  5254. return -EBUSY;
  5255. }
  5256. static int hot_add_disk(struct mddev * mddev, dev_t dev)
  5257. {
  5258. char b[BDEVNAME_SIZE];
  5259. int err;
  5260. struct md_rdev *rdev;
  5261. if (!mddev->pers)
  5262. return -ENODEV;
  5263. if (mddev->major_version != 0) {
  5264. printk(KERN_WARNING "%s: HOT_ADD may only be used with"
  5265. " version-0 superblocks.\n",
  5266. mdname(mddev));
  5267. return -EINVAL;
  5268. }
  5269. if (!mddev->pers->hot_add_disk) {
  5270. printk(KERN_WARNING
  5271. "%s: personality does not support diskops!\n",
  5272. mdname(mddev));
  5273. return -EINVAL;
  5274. }
  5275. rdev = md_import_device(dev, -1, 0);
  5276. if (IS_ERR(rdev)) {
  5277. printk(KERN_WARNING
  5278. "md: error, md_import_device() returned %ld\n",
  5279. PTR_ERR(rdev));
  5280. return -EINVAL;
  5281. }
  5282. if (mddev->persistent)
  5283. rdev->sb_start = calc_dev_sboffset(rdev);
  5284. else
  5285. rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
  5286. rdev->sectors = rdev->sb_start;
  5287. if (test_bit(Faulty, &rdev->flags)) {
  5288. printk(KERN_WARNING
  5289. "md: can not hot-add faulty %s disk to %s!\n",
  5290. bdevname(rdev->bdev,b), mdname(mddev));
  5291. err = -EINVAL;
  5292. goto abort_export;
  5293. }
  5294. clear_bit(In_sync, &rdev->flags);
  5295. rdev->desc_nr = -1;
  5296. rdev->saved_raid_disk = -1;
  5297. err = bind_rdev_to_array(rdev, mddev);
  5298. if (err)
  5299. goto abort_export;
  5300. /*
  5301. * The rest should better be atomic, we can have disk failures
  5302. * noticed in interrupt contexts ...
  5303. */
  5304. rdev->raid_disk = -1;
  5305. md_update_sb(mddev, 1);
  5306. /*
  5307. * Kick recovery, maybe this spare has to be added to the
  5308. * array immediately.
  5309. */
  5310. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5311. md_wakeup_thread(mddev->thread);
  5312. md_new_event(mddev);
  5313. return 0;
  5314. abort_export:
  5315. export_rdev(rdev);
  5316. return err;
  5317. }
  5318. static int set_bitmap_file(struct mddev *mddev, int fd)
  5319. {
  5320. int err;
  5321. if (mddev->pers) {
  5322. if (!mddev->pers->quiesce)
  5323. return -EBUSY;
  5324. if (mddev->recovery || mddev->sync_thread)
  5325. return -EBUSY;
  5326. /* we should be able to change the bitmap.. */
  5327. }
  5328. if (fd >= 0) {
  5329. if (mddev->bitmap)
  5330. return -EEXIST; /* cannot add when bitmap is present */
  5331. mddev->bitmap_info.file = fget(fd);
  5332. if (mddev->bitmap_info.file == NULL) {
  5333. printk(KERN_ERR "%s: error: failed to get bitmap file\n",
  5334. mdname(mddev));
  5335. return -EBADF;
  5336. }
  5337. err = deny_bitmap_write_access(mddev->bitmap_info.file);
  5338. if (err) {
  5339. printk(KERN_ERR "%s: error: bitmap file is already in use\n",
  5340. mdname(mddev));
  5341. fput(mddev->bitmap_info.file);
  5342. mddev->bitmap_info.file = NULL;
  5343. return err;
  5344. }
  5345. mddev->bitmap_info.offset = 0; /* file overrides offset */
  5346. } else if (mddev->bitmap == NULL)
  5347. return -ENOENT; /* cannot remove what isn't there */
  5348. err = 0;
  5349. if (mddev->pers) {
  5350. mddev->pers->quiesce(mddev, 1);
  5351. if (fd >= 0) {
  5352. err = bitmap_create(mddev);
  5353. if (!err)
  5354. err = bitmap_load(mddev);
  5355. }
  5356. if (fd < 0 || err) {
  5357. bitmap_destroy(mddev);
  5358. fd = -1; /* make sure to put the file */
  5359. }
  5360. mddev->pers->quiesce(mddev, 0);
  5361. }
  5362. if (fd < 0) {
  5363. if (mddev->bitmap_info.file) {
  5364. restore_bitmap_write_access(mddev->bitmap_info.file);
  5365. fput(mddev->bitmap_info.file);
  5366. }
  5367. mddev->bitmap_info.file = NULL;
  5368. }
  5369. return err;
  5370. }
  5371. /*
  5372. * set_array_info is used two different ways
  5373. * The original usage is when creating a new array.
  5374. * In this usage, raid_disks is > 0 and it together with
  5375. * level, size, not_persistent,layout,chunksize determine the
  5376. * shape of the array.
  5377. * This will always create an array with a type-0.90.0 superblock.
  5378. * The newer usage is when assembling an array.
  5379. * In this case raid_disks will be 0, and the major_version field is
  5380. * use to determine which style super-blocks are to be found on the devices.
  5381. * The minor and patch _version numbers are also kept incase the
  5382. * super_block handler wishes to interpret them.
  5383. */
  5384. static int set_array_info(struct mddev * mddev, mdu_array_info_t *info)
  5385. {
  5386. if (info->raid_disks == 0) {
  5387. /* just setting version number for superblock loading */
  5388. if (info->major_version < 0 ||
  5389. info->major_version >= ARRAY_SIZE(super_types) ||
  5390. super_types[info->major_version].name == NULL) {
  5391. /* maybe try to auto-load a module? */
  5392. printk(KERN_INFO
  5393. "md: superblock version %d not known\n",
  5394. info->major_version);
  5395. return -EINVAL;
  5396. }
  5397. mddev->major_version = info->major_version;
  5398. mddev->minor_version = info->minor_version;
  5399. mddev->patch_version = info->patch_version;
  5400. mddev->persistent = !info->not_persistent;
  5401. /* ensure mddev_put doesn't delete this now that there
  5402. * is some minimal configuration.
  5403. */
  5404. mddev->ctime = get_seconds();
  5405. return 0;
  5406. }
  5407. mddev->major_version = MD_MAJOR_VERSION;
  5408. mddev->minor_version = MD_MINOR_VERSION;
  5409. mddev->patch_version = MD_PATCHLEVEL_VERSION;
  5410. mddev->ctime = get_seconds();
  5411. mddev->level = info->level;
  5412. mddev->clevel[0] = 0;
  5413. mddev->dev_sectors = 2 * (sector_t)info->size;
  5414. mddev->raid_disks = info->raid_disks;
  5415. /* don't set md_minor, it is determined by which /dev/md* was
  5416. * openned
  5417. */
  5418. if (info->state & (1<<MD_SB_CLEAN))
  5419. mddev->recovery_cp = MaxSector;
  5420. else
  5421. mddev->recovery_cp = 0;
  5422. mddev->persistent = ! info->not_persistent;
  5423. mddev->external = 0;
  5424. mddev->layout = info->layout;
  5425. mddev->chunk_sectors = info->chunk_size >> 9;
  5426. mddev->max_disks = MD_SB_DISKS;
  5427. if (mddev->persistent)
  5428. mddev->flags = 0;
  5429. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  5430. mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
  5431. mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
  5432. mddev->bitmap_info.offset = 0;
  5433. mddev->reshape_position = MaxSector;
  5434. /*
  5435. * Generate a 128 bit UUID
  5436. */
  5437. get_random_bytes(mddev->uuid, 16);
  5438. mddev->new_level = mddev->level;
  5439. mddev->new_chunk_sectors = mddev->chunk_sectors;
  5440. mddev->new_layout = mddev->layout;
  5441. mddev->delta_disks = 0;
  5442. mddev->reshape_backwards = 0;
  5443. return 0;
  5444. }
  5445. void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
  5446. {
  5447. WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
  5448. if (mddev->external_size)
  5449. return;
  5450. mddev->array_sectors = array_sectors;
  5451. }
  5452. EXPORT_SYMBOL(md_set_array_sectors);
  5453. static int update_size(struct mddev *mddev, sector_t num_sectors)
  5454. {
  5455. struct md_rdev *rdev;
  5456. int rv;
  5457. int fit = (num_sectors == 0);
  5458. if (mddev->pers->resize == NULL)
  5459. return -EINVAL;
  5460. /* The "num_sectors" is the number of sectors of each device that
  5461. * is used. This can only make sense for arrays with redundancy.
  5462. * linear and raid0 always use whatever space is available. We can only
  5463. * consider changing this number if no resync or reconstruction is
  5464. * happening, and if the new size is acceptable. It must fit before the
  5465. * sb_start or, if that is <data_offset, it must fit before the size
  5466. * of each device. If num_sectors is zero, we find the largest size
  5467. * that fits.
  5468. */
  5469. if (mddev->sync_thread)
  5470. return -EBUSY;
  5471. rdev_for_each(rdev, mddev) {
  5472. sector_t avail = rdev->sectors;
  5473. if (fit && (num_sectors == 0 || num_sectors > avail))
  5474. num_sectors = avail;
  5475. if (avail < num_sectors)
  5476. return -ENOSPC;
  5477. }
  5478. rv = mddev->pers->resize(mddev, num_sectors);
  5479. if (!rv)
  5480. revalidate_disk(mddev->gendisk);
  5481. return rv;
  5482. }
  5483. static int update_raid_disks(struct mddev *mddev, int raid_disks)
  5484. {
  5485. int rv;
  5486. struct md_rdev *rdev;
  5487. /* change the number of raid disks */
  5488. if (mddev->pers->check_reshape == NULL)
  5489. return -EINVAL;
  5490. if (raid_disks <= 0 ||
  5491. (mddev->max_disks && raid_disks >= mddev->max_disks))
  5492. return -EINVAL;
  5493. if (mddev->sync_thread || mddev->reshape_position != MaxSector)
  5494. return -EBUSY;
  5495. rdev_for_each(rdev, mddev) {
  5496. if (mddev->raid_disks < raid_disks &&
  5497. rdev->data_offset < rdev->new_data_offset)
  5498. return -EINVAL;
  5499. if (mddev->raid_disks > raid_disks &&
  5500. rdev->data_offset > rdev->new_data_offset)
  5501. return -EINVAL;
  5502. }
  5503. mddev->delta_disks = raid_disks - mddev->raid_disks;
  5504. if (mddev->delta_disks < 0)
  5505. mddev->reshape_backwards = 1;
  5506. else if (mddev->delta_disks > 0)
  5507. mddev->reshape_backwards = 0;
  5508. rv = mddev->pers->check_reshape(mddev);
  5509. if (rv < 0) {
  5510. mddev->delta_disks = 0;
  5511. mddev->reshape_backwards = 0;
  5512. }
  5513. return rv;
  5514. }
  5515. /*
  5516. * update_array_info is used to change the configuration of an
  5517. * on-line array.
  5518. * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
  5519. * fields in the info are checked against the array.
  5520. * Any differences that cannot be handled will cause an error.
  5521. * Normally, only one change can be managed at a time.
  5522. */
  5523. static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
  5524. {
  5525. int rv = 0;
  5526. int cnt = 0;
  5527. int state = 0;
  5528. /* calculate expected state,ignoring low bits */
  5529. if (mddev->bitmap && mddev->bitmap_info.offset)
  5530. state |= (1 << MD_SB_BITMAP_PRESENT);
  5531. if (mddev->major_version != info->major_version ||
  5532. mddev->minor_version != info->minor_version ||
  5533. /* mddev->patch_version != info->patch_version || */
  5534. mddev->ctime != info->ctime ||
  5535. mddev->level != info->level ||
  5536. /* mddev->layout != info->layout || */
  5537. !mddev->persistent != info->not_persistent||
  5538. mddev->chunk_sectors != info->chunk_size >> 9 ||
  5539. /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
  5540. ((state^info->state) & 0xfffffe00)
  5541. )
  5542. return -EINVAL;
  5543. /* Check there is only one change */
  5544. if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
  5545. cnt++;
  5546. if (mddev->raid_disks != info->raid_disks)
  5547. cnt++;
  5548. if (mddev->layout != info->layout)
  5549. cnt++;
  5550. if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
  5551. cnt++;
  5552. if (cnt == 0)
  5553. return 0;
  5554. if (cnt > 1)
  5555. return -EINVAL;
  5556. if (mddev->layout != info->layout) {
  5557. /* Change layout
  5558. * we don't need to do anything at the md level, the
  5559. * personality will take care of it all.
  5560. */
  5561. if (mddev->pers->check_reshape == NULL)
  5562. return -EINVAL;
  5563. else {
  5564. mddev->new_layout = info->layout;
  5565. rv = mddev->pers->check_reshape(mddev);
  5566. if (rv)
  5567. mddev->new_layout = mddev->layout;
  5568. return rv;
  5569. }
  5570. }
  5571. if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
  5572. rv = update_size(mddev, (sector_t)info->size * 2);
  5573. if (mddev->raid_disks != info->raid_disks)
  5574. rv = update_raid_disks(mddev, info->raid_disks);
  5575. if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
  5576. if (mddev->pers->quiesce == NULL)
  5577. return -EINVAL;
  5578. if (mddev->recovery || mddev->sync_thread)
  5579. return -EBUSY;
  5580. if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
  5581. /* add the bitmap */
  5582. if (mddev->bitmap)
  5583. return -EEXIST;
  5584. if (mddev->bitmap_info.default_offset == 0)
  5585. return -EINVAL;
  5586. mddev->bitmap_info.offset =
  5587. mddev->bitmap_info.default_offset;
  5588. mddev->bitmap_info.space =
  5589. mddev->bitmap_info.default_space;
  5590. mddev->pers->quiesce(mddev, 1);
  5591. rv = bitmap_create(mddev);
  5592. if (!rv)
  5593. rv = bitmap_load(mddev);
  5594. if (rv)
  5595. bitmap_destroy(mddev);
  5596. mddev->pers->quiesce(mddev, 0);
  5597. } else {
  5598. /* remove the bitmap */
  5599. if (!mddev->bitmap)
  5600. return -ENOENT;
  5601. if (mddev->bitmap->storage.file)
  5602. return -EINVAL;
  5603. mddev->pers->quiesce(mddev, 1);
  5604. bitmap_destroy(mddev);
  5605. mddev->pers->quiesce(mddev, 0);
  5606. mddev->bitmap_info.offset = 0;
  5607. }
  5608. }
  5609. md_update_sb(mddev, 1);
  5610. return rv;
  5611. }
  5612. static int set_disk_faulty(struct mddev *mddev, dev_t dev)
  5613. {
  5614. struct md_rdev *rdev;
  5615. if (mddev->pers == NULL)
  5616. return -ENODEV;
  5617. rdev = find_rdev(mddev, dev);
  5618. if (!rdev)
  5619. return -ENODEV;
  5620. md_error(mddev, rdev);
  5621. if (!test_bit(Faulty, &rdev->flags))
  5622. return -EBUSY;
  5623. return 0;
  5624. }
  5625. /*
  5626. * We have a problem here : there is no easy way to give a CHS
  5627. * virtual geometry. We currently pretend that we have a 2 heads
  5628. * 4 sectors (with a BIG number of cylinders...). This drives
  5629. * dosfs just mad... ;-)
  5630. */
  5631. static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  5632. {
  5633. struct mddev *mddev = bdev->bd_disk->private_data;
  5634. geo->heads = 2;
  5635. geo->sectors = 4;
  5636. geo->cylinders = mddev->array_sectors / 8;
  5637. return 0;
  5638. }
  5639. static int md_ioctl(struct block_device *bdev, fmode_t mode,
  5640. unsigned int cmd, unsigned long arg)
  5641. {
  5642. int err = 0;
  5643. void __user *argp = (void __user *)arg;
  5644. struct mddev *mddev = NULL;
  5645. int ro;
  5646. switch (cmd) {
  5647. case RAID_VERSION:
  5648. case GET_ARRAY_INFO:
  5649. case GET_DISK_INFO:
  5650. break;
  5651. default:
  5652. if (!capable(CAP_SYS_ADMIN))
  5653. return -EACCES;
  5654. }
  5655. /*
  5656. * Commands dealing with the RAID driver but not any
  5657. * particular array:
  5658. */
  5659. switch (cmd)
  5660. {
  5661. case RAID_VERSION:
  5662. err = get_version(argp);
  5663. goto done;
  5664. case PRINT_RAID_DEBUG:
  5665. err = 0;
  5666. md_print_devices();
  5667. goto done;
  5668. #ifndef MODULE
  5669. case RAID_AUTORUN:
  5670. err = 0;
  5671. autostart_arrays(arg);
  5672. goto done;
  5673. #endif
  5674. default:;
  5675. }
  5676. /*
  5677. * Commands creating/starting a new array:
  5678. */
  5679. mddev = bdev->bd_disk->private_data;
  5680. if (!mddev) {
  5681. BUG();
  5682. goto abort;
  5683. }
  5684. err = mddev_lock(mddev);
  5685. if (err) {
  5686. printk(KERN_INFO
  5687. "md: ioctl lock interrupted, reason %d, cmd %d\n",
  5688. err, cmd);
  5689. goto abort;
  5690. }
  5691. switch (cmd)
  5692. {
  5693. case SET_ARRAY_INFO:
  5694. {
  5695. mdu_array_info_t info;
  5696. if (!arg)
  5697. memset(&info, 0, sizeof(info));
  5698. else if (copy_from_user(&info, argp, sizeof(info))) {
  5699. err = -EFAULT;
  5700. goto abort_unlock;
  5701. }
  5702. if (mddev->pers) {
  5703. err = update_array_info(mddev, &info);
  5704. if (err) {
  5705. printk(KERN_WARNING "md: couldn't update"
  5706. " array info. %d\n", err);
  5707. goto abort_unlock;
  5708. }
  5709. goto done_unlock;
  5710. }
  5711. if (!list_empty(&mddev->disks)) {
  5712. printk(KERN_WARNING
  5713. "md: array %s already has disks!\n",
  5714. mdname(mddev));
  5715. err = -EBUSY;
  5716. goto abort_unlock;
  5717. }
  5718. if (mddev->raid_disks) {
  5719. printk(KERN_WARNING
  5720. "md: array %s already initialised!\n",
  5721. mdname(mddev));
  5722. err = -EBUSY;
  5723. goto abort_unlock;
  5724. }
  5725. err = set_array_info(mddev, &info);
  5726. if (err) {
  5727. printk(KERN_WARNING "md: couldn't set"
  5728. " array info. %d\n", err);
  5729. goto abort_unlock;
  5730. }
  5731. }
  5732. goto done_unlock;
  5733. default:;
  5734. }
  5735. /*
  5736. * Commands querying/configuring an existing array:
  5737. */
  5738. /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
  5739. * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
  5740. if ((!mddev->raid_disks && !mddev->external)
  5741. && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
  5742. && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
  5743. && cmd != GET_BITMAP_FILE) {
  5744. err = -ENODEV;
  5745. goto abort_unlock;
  5746. }
  5747. /*
  5748. * Commands even a read-only array can execute:
  5749. */
  5750. switch (cmd)
  5751. {
  5752. case GET_ARRAY_INFO:
  5753. err = get_array_info(mddev, argp);
  5754. goto done_unlock;
  5755. case GET_BITMAP_FILE:
  5756. err = get_bitmap_file(mddev, argp);
  5757. goto done_unlock;
  5758. case GET_DISK_INFO:
  5759. err = get_disk_info(mddev, argp);
  5760. goto done_unlock;
  5761. case RESTART_ARRAY_RW:
  5762. err = restart_array(mddev);
  5763. goto done_unlock;
  5764. case STOP_ARRAY:
  5765. err = do_md_stop(mddev, 0, bdev);
  5766. goto done_unlock;
  5767. case STOP_ARRAY_RO:
  5768. err = md_set_readonly(mddev, bdev);
  5769. goto done_unlock;
  5770. case BLKROSET:
  5771. if (get_user(ro, (int __user *)(arg))) {
  5772. err = -EFAULT;
  5773. goto done_unlock;
  5774. }
  5775. err = -EINVAL;
  5776. /* if the bdev is going readonly the value of mddev->ro
  5777. * does not matter, no writes are coming
  5778. */
  5779. if (ro)
  5780. goto done_unlock;
  5781. /* are we are already prepared for writes? */
  5782. if (mddev->ro != 1)
  5783. goto done_unlock;
  5784. /* transitioning to readauto need only happen for
  5785. * arrays that call md_write_start
  5786. */
  5787. if (mddev->pers) {
  5788. err = restart_array(mddev);
  5789. if (err == 0) {
  5790. mddev->ro = 2;
  5791. set_disk_ro(mddev->gendisk, 0);
  5792. }
  5793. }
  5794. goto done_unlock;
  5795. }
  5796. /*
  5797. * The remaining ioctls are changing the state of the
  5798. * superblock, so we do not allow them on read-only arrays.
  5799. * However non-MD ioctls (e.g. get-size) will still come through
  5800. * here and hit the 'default' below, so only disallow
  5801. * 'md' ioctls, and switch to rw mode if started auto-readonly.
  5802. */
  5803. if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
  5804. if (mddev->ro == 2) {
  5805. mddev->ro = 0;
  5806. sysfs_notify_dirent_safe(mddev->sysfs_state);
  5807. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5808. md_wakeup_thread(mddev->thread);
  5809. } else {
  5810. err = -EROFS;
  5811. goto abort_unlock;
  5812. }
  5813. }
  5814. switch (cmd)
  5815. {
  5816. case ADD_NEW_DISK:
  5817. {
  5818. mdu_disk_info_t info;
  5819. if (copy_from_user(&info, argp, sizeof(info)))
  5820. err = -EFAULT;
  5821. else
  5822. err = add_new_disk(mddev, &info);
  5823. goto done_unlock;
  5824. }
  5825. case HOT_REMOVE_DISK:
  5826. err = hot_remove_disk(mddev, new_decode_dev(arg));
  5827. goto done_unlock;
  5828. case HOT_ADD_DISK:
  5829. err = hot_add_disk(mddev, new_decode_dev(arg));
  5830. goto done_unlock;
  5831. case SET_DISK_FAULTY:
  5832. err = set_disk_faulty(mddev, new_decode_dev(arg));
  5833. goto done_unlock;
  5834. case RUN_ARRAY:
  5835. err = do_md_run(mddev);
  5836. goto done_unlock;
  5837. case SET_BITMAP_FILE:
  5838. err = set_bitmap_file(mddev, (int)arg);
  5839. goto done_unlock;
  5840. default:
  5841. err = -EINVAL;
  5842. goto abort_unlock;
  5843. }
  5844. done_unlock:
  5845. abort_unlock:
  5846. if (mddev->hold_active == UNTIL_IOCTL &&
  5847. err != -EINVAL)
  5848. mddev->hold_active = 0;
  5849. mddev_unlock(mddev);
  5850. return err;
  5851. done:
  5852. if (err)
  5853. MD_BUG();
  5854. abort:
  5855. return err;
  5856. }
  5857. #ifdef CONFIG_COMPAT
  5858. static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
  5859. unsigned int cmd, unsigned long arg)
  5860. {
  5861. switch (cmd) {
  5862. case HOT_REMOVE_DISK:
  5863. case HOT_ADD_DISK:
  5864. case SET_DISK_FAULTY:
  5865. case SET_BITMAP_FILE:
  5866. /* These take in integer arg, do not convert */
  5867. break;
  5868. default:
  5869. arg = (unsigned long)compat_ptr(arg);
  5870. break;
  5871. }
  5872. return md_ioctl(bdev, mode, cmd, arg);
  5873. }
  5874. #endif /* CONFIG_COMPAT */
  5875. static int md_open(struct block_device *bdev, fmode_t mode)
  5876. {
  5877. /*
  5878. * Succeed if we can lock the mddev, which confirms that
  5879. * it isn't being stopped right now.
  5880. */
  5881. struct mddev *mddev = mddev_find(bdev->bd_dev);
  5882. int err;
  5883. if (!mddev)
  5884. return -ENODEV;
  5885. if (mddev->gendisk != bdev->bd_disk) {
  5886. /* we are racing with mddev_put which is discarding this
  5887. * bd_disk.
  5888. */
  5889. mddev_put(mddev);
  5890. /* Wait until bdev->bd_disk is definitely gone */
  5891. flush_workqueue(md_misc_wq);
  5892. /* Then retry the open from the top */
  5893. return -ERESTARTSYS;
  5894. }
  5895. BUG_ON(mddev != bdev->bd_disk->private_data);
  5896. if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
  5897. goto out;
  5898. err = 0;
  5899. atomic_inc(&mddev->openers);
  5900. mutex_unlock(&mddev->open_mutex);
  5901. check_disk_change(bdev);
  5902. out:
  5903. return err;
  5904. }
  5905. static int md_release(struct gendisk *disk, fmode_t mode)
  5906. {
  5907. struct mddev *mddev = disk->private_data;
  5908. BUG_ON(!mddev);
  5909. atomic_dec(&mddev->openers);
  5910. mddev_put(mddev);
  5911. return 0;
  5912. }
  5913. static int md_media_changed(struct gendisk *disk)
  5914. {
  5915. struct mddev *mddev = disk->private_data;
  5916. return mddev->changed;
  5917. }
  5918. static int md_revalidate(struct gendisk *disk)
  5919. {
  5920. struct mddev *mddev = disk->private_data;
  5921. mddev->changed = 0;
  5922. return 0;
  5923. }
  5924. static const struct block_device_operations md_fops =
  5925. {
  5926. .owner = THIS_MODULE,
  5927. .open = md_open,
  5928. .release = md_release,
  5929. .ioctl = md_ioctl,
  5930. #ifdef CONFIG_COMPAT
  5931. .compat_ioctl = md_compat_ioctl,
  5932. #endif
  5933. .getgeo = md_getgeo,
  5934. .media_changed = md_media_changed,
  5935. .revalidate_disk= md_revalidate,
  5936. };
  5937. static int md_thread(void * arg)
  5938. {
  5939. struct md_thread *thread = arg;
  5940. /*
  5941. * md_thread is a 'system-thread', it's priority should be very
  5942. * high. We avoid resource deadlocks individually in each
  5943. * raid personality. (RAID5 does preallocation) We also use RR and
  5944. * the very same RT priority as kswapd, thus we will never get
  5945. * into a priority inversion deadlock.
  5946. *
  5947. * we definitely have to have equal or higher priority than
  5948. * bdflush, otherwise bdflush will deadlock if there are too
  5949. * many dirty RAID5 blocks.
  5950. */
  5951. allow_signal(SIGKILL);
  5952. while (!kthread_should_stop()) {
  5953. /* We need to wait INTERRUPTIBLE so that
  5954. * we don't add to the load-average.
  5955. * That means we need to be sure no signals are
  5956. * pending
  5957. */
  5958. if (signal_pending(current))
  5959. flush_signals(current);
  5960. wait_event_interruptible_timeout
  5961. (thread->wqueue,
  5962. test_bit(THREAD_WAKEUP, &thread->flags)
  5963. || kthread_should_stop(),
  5964. thread->timeout);
  5965. clear_bit(THREAD_WAKEUP, &thread->flags);
  5966. if (!kthread_should_stop())
  5967. thread->run(thread->mddev);
  5968. }
  5969. return 0;
  5970. }
  5971. void md_wakeup_thread(struct md_thread *thread)
  5972. {
  5973. if (thread) {
  5974. pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
  5975. set_bit(THREAD_WAKEUP, &thread->flags);
  5976. wake_up(&thread->wqueue);
  5977. }
  5978. }
  5979. struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev *mddev,
  5980. const char *name)
  5981. {
  5982. struct md_thread *thread;
  5983. thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
  5984. if (!thread)
  5985. return NULL;
  5986. init_waitqueue_head(&thread->wqueue);
  5987. thread->run = run;
  5988. thread->mddev = mddev;
  5989. thread->timeout = MAX_SCHEDULE_TIMEOUT;
  5990. thread->tsk = kthread_run(md_thread, thread,
  5991. "%s_%s",
  5992. mdname(thread->mddev),
  5993. name);
  5994. if (IS_ERR(thread->tsk)) {
  5995. kfree(thread);
  5996. return NULL;
  5997. }
  5998. return thread;
  5999. }
  6000. void md_unregister_thread(struct md_thread **threadp)
  6001. {
  6002. struct md_thread *thread = *threadp;
  6003. if (!thread)
  6004. return;
  6005. pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
  6006. /* Locking ensures that mddev_unlock does not wake_up a
  6007. * non-existent thread
  6008. */
  6009. spin_lock(&pers_lock);
  6010. *threadp = NULL;
  6011. spin_unlock(&pers_lock);
  6012. kthread_stop(thread->tsk);
  6013. kfree(thread);
  6014. }
  6015. void md_error(struct mddev *mddev, struct md_rdev *rdev)
  6016. {
  6017. if (!mddev) {
  6018. MD_BUG();
  6019. return;
  6020. }
  6021. if (!rdev || test_bit(Faulty, &rdev->flags))
  6022. return;
  6023. if (!mddev->pers || !mddev->pers->error_handler)
  6024. return;
  6025. mddev->pers->error_handler(mddev,rdev);
  6026. if (mddev->degraded)
  6027. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  6028. sysfs_notify_dirent_safe(rdev->sysfs_state);
  6029. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6030. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6031. md_wakeup_thread(mddev->thread);
  6032. if (mddev->event_work.func)
  6033. queue_work(md_misc_wq, &mddev->event_work);
  6034. md_new_event_inintr(mddev);
  6035. }
  6036. /* seq_file implementation /proc/mdstat */
  6037. static void status_unused(struct seq_file *seq)
  6038. {
  6039. int i = 0;
  6040. struct md_rdev *rdev;
  6041. seq_printf(seq, "unused devices: ");
  6042. list_for_each_entry(rdev, &pending_raid_disks, same_set) {
  6043. char b[BDEVNAME_SIZE];
  6044. i++;
  6045. seq_printf(seq, "%s ",
  6046. bdevname(rdev->bdev,b));
  6047. }
  6048. if (!i)
  6049. seq_printf(seq, "<none>");
  6050. seq_printf(seq, "\n");
  6051. }
  6052. static void status_resync(struct seq_file *seq, struct mddev * mddev)
  6053. {
  6054. sector_t max_sectors, resync, res;
  6055. unsigned long dt, db;
  6056. sector_t rt;
  6057. int scale;
  6058. unsigned int per_milli;
  6059. resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
  6060. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
  6061. test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  6062. max_sectors = mddev->resync_max_sectors;
  6063. else
  6064. max_sectors = mddev->dev_sectors;
  6065. /*
  6066. * Should not happen.
  6067. */
  6068. if (!max_sectors) {
  6069. MD_BUG();
  6070. return;
  6071. }
  6072. /* Pick 'scale' such that (resync>>scale)*1000 will fit
  6073. * in a sector_t, and (max_sectors>>scale) will fit in a
  6074. * u32, as those are the requirements for sector_div.
  6075. * Thus 'scale' must be at least 10
  6076. */
  6077. scale = 10;
  6078. if (sizeof(sector_t) > sizeof(unsigned long)) {
  6079. while ( max_sectors/2 > (1ULL<<(scale+32)))
  6080. scale++;
  6081. }
  6082. res = (resync>>scale)*1000;
  6083. sector_div(res, (u32)((max_sectors>>scale)+1));
  6084. per_milli = res;
  6085. {
  6086. int i, x = per_milli/50, y = 20-x;
  6087. seq_printf(seq, "[");
  6088. for (i = 0; i < x; i++)
  6089. seq_printf(seq, "=");
  6090. seq_printf(seq, ">");
  6091. for (i = 0; i < y; i++)
  6092. seq_printf(seq, ".");
  6093. seq_printf(seq, "] ");
  6094. }
  6095. seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
  6096. (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
  6097. "reshape" :
  6098. (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
  6099. "check" :
  6100. (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
  6101. "resync" : "recovery"))),
  6102. per_milli/10, per_milli % 10,
  6103. (unsigned long long) resync/2,
  6104. (unsigned long long) max_sectors/2);
  6105. /*
  6106. * dt: time from mark until now
  6107. * db: blocks written from mark until now
  6108. * rt: remaining time
  6109. *
  6110. * rt is a sector_t, so could be 32bit or 64bit.
  6111. * So we divide before multiply in case it is 32bit and close
  6112. * to the limit.
  6113. * We scale the divisor (db) by 32 to avoid losing precision
  6114. * near the end of resync when the number of remaining sectors
  6115. * is close to 'db'.
  6116. * We then divide rt by 32 after multiplying by db to compensate.
  6117. * The '+1' avoids division by zero if db is very small.
  6118. */
  6119. dt = ((jiffies - mddev->resync_mark) / HZ);
  6120. if (!dt) dt++;
  6121. db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
  6122. - mddev->resync_mark_cnt;
  6123. rt = max_sectors - resync; /* number of remaining sectors */
  6124. sector_div(rt, db/32+1);
  6125. rt *= dt;
  6126. rt >>= 5;
  6127. seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
  6128. ((unsigned long)rt % 60)/6);
  6129. seq_printf(seq, " speed=%ldK/sec", db/2/dt);
  6130. }
  6131. static void *md_seq_start(struct seq_file *seq, loff_t *pos)
  6132. {
  6133. struct list_head *tmp;
  6134. loff_t l = *pos;
  6135. struct mddev *mddev;
  6136. if (l >= 0x10000)
  6137. return NULL;
  6138. if (!l--)
  6139. /* header */
  6140. return (void*)1;
  6141. spin_lock(&all_mddevs_lock);
  6142. list_for_each(tmp,&all_mddevs)
  6143. if (!l--) {
  6144. mddev = list_entry(tmp, struct mddev, all_mddevs);
  6145. mddev_get(mddev);
  6146. spin_unlock(&all_mddevs_lock);
  6147. return mddev;
  6148. }
  6149. spin_unlock(&all_mddevs_lock);
  6150. if (!l--)
  6151. return (void*)2;/* tail */
  6152. return NULL;
  6153. }
  6154. static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  6155. {
  6156. struct list_head *tmp;
  6157. struct mddev *next_mddev, *mddev = v;
  6158. ++*pos;
  6159. if (v == (void*)2)
  6160. return NULL;
  6161. spin_lock(&all_mddevs_lock);
  6162. if (v == (void*)1)
  6163. tmp = all_mddevs.next;
  6164. else
  6165. tmp = mddev->all_mddevs.next;
  6166. if (tmp != &all_mddevs)
  6167. next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
  6168. else {
  6169. next_mddev = (void*)2;
  6170. *pos = 0x10000;
  6171. }
  6172. spin_unlock(&all_mddevs_lock);
  6173. if (v != (void*)1)
  6174. mddev_put(mddev);
  6175. return next_mddev;
  6176. }
  6177. static void md_seq_stop(struct seq_file *seq, void *v)
  6178. {
  6179. struct mddev *mddev = v;
  6180. if (mddev && v != (void*)1 && v != (void*)2)
  6181. mddev_put(mddev);
  6182. }
  6183. static int md_seq_show(struct seq_file *seq, void *v)
  6184. {
  6185. struct mddev *mddev = v;
  6186. sector_t sectors;
  6187. struct md_rdev *rdev;
  6188. if (v == (void*)1) {
  6189. struct md_personality *pers;
  6190. seq_printf(seq, "Personalities : ");
  6191. spin_lock(&pers_lock);
  6192. list_for_each_entry(pers, &pers_list, list)
  6193. seq_printf(seq, "[%s] ", pers->name);
  6194. spin_unlock(&pers_lock);
  6195. seq_printf(seq, "\n");
  6196. seq->poll_event = atomic_read(&md_event_count);
  6197. return 0;
  6198. }
  6199. if (v == (void*)2) {
  6200. status_unused(seq);
  6201. return 0;
  6202. }
  6203. if (mddev_lock(mddev) < 0)
  6204. return -EINTR;
  6205. if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
  6206. seq_printf(seq, "%s : %sactive", mdname(mddev),
  6207. mddev->pers ? "" : "in");
  6208. if (mddev->pers) {
  6209. if (mddev->ro==1)
  6210. seq_printf(seq, " (read-only)");
  6211. if (mddev->ro==2)
  6212. seq_printf(seq, " (auto-read-only)");
  6213. seq_printf(seq, " %s", mddev->pers->name);
  6214. }
  6215. sectors = 0;
  6216. rdev_for_each(rdev, mddev) {
  6217. char b[BDEVNAME_SIZE];
  6218. seq_printf(seq, " %s[%d]",
  6219. bdevname(rdev->bdev,b), rdev->desc_nr);
  6220. if (test_bit(WriteMostly, &rdev->flags))
  6221. seq_printf(seq, "(W)");
  6222. if (test_bit(Faulty, &rdev->flags)) {
  6223. seq_printf(seq, "(F)");
  6224. continue;
  6225. }
  6226. if (rdev->raid_disk < 0)
  6227. seq_printf(seq, "(S)"); /* spare */
  6228. if (test_bit(Replacement, &rdev->flags))
  6229. seq_printf(seq, "(R)");
  6230. sectors += rdev->sectors;
  6231. }
  6232. if (!list_empty(&mddev->disks)) {
  6233. if (mddev->pers)
  6234. seq_printf(seq, "\n %llu blocks",
  6235. (unsigned long long)
  6236. mddev->array_sectors / 2);
  6237. else
  6238. seq_printf(seq, "\n %llu blocks",
  6239. (unsigned long long)sectors / 2);
  6240. }
  6241. if (mddev->persistent) {
  6242. if (mddev->major_version != 0 ||
  6243. mddev->minor_version != 90) {
  6244. seq_printf(seq," super %d.%d",
  6245. mddev->major_version,
  6246. mddev->minor_version);
  6247. }
  6248. } else if (mddev->external)
  6249. seq_printf(seq, " super external:%s",
  6250. mddev->metadata_type);
  6251. else
  6252. seq_printf(seq, " super non-persistent");
  6253. if (mddev->pers) {
  6254. mddev->pers->status(seq, mddev);
  6255. seq_printf(seq, "\n ");
  6256. if (mddev->pers->sync_request) {
  6257. if (mddev->curr_resync > 2) {
  6258. status_resync(seq, mddev);
  6259. seq_printf(seq, "\n ");
  6260. } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
  6261. seq_printf(seq, "\tresync=DELAYED\n ");
  6262. else if (mddev->recovery_cp < MaxSector)
  6263. seq_printf(seq, "\tresync=PENDING\n ");
  6264. }
  6265. } else
  6266. seq_printf(seq, "\n ");
  6267. bitmap_status(seq, mddev->bitmap);
  6268. seq_printf(seq, "\n");
  6269. }
  6270. mddev_unlock(mddev);
  6271. return 0;
  6272. }
  6273. static const struct seq_operations md_seq_ops = {
  6274. .start = md_seq_start,
  6275. .next = md_seq_next,
  6276. .stop = md_seq_stop,
  6277. .show = md_seq_show,
  6278. };
  6279. static int md_seq_open(struct inode *inode, struct file *file)
  6280. {
  6281. struct seq_file *seq;
  6282. int error;
  6283. error = seq_open(file, &md_seq_ops);
  6284. if (error)
  6285. return error;
  6286. seq = file->private_data;
  6287. seq->poll_event = atomic_read(&md_event_count);
  6288. return error;
  6289. }
  6290. static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
  6291. {
  6292. struct seq_file *seq = filp->private_data;
  6293. int mask;
  6294. poll_wait(filp, &md_event_waiters, wait);
  6295. /* always allow read */
  6296. mask = POLLIN | POLLRDNORM;
  6297. if (seq->poll_event != atomic_read(&md_event_count))
  6298. mask |= POLLERR | POLLPRI;
  6299. return mask;
  6300. }
  6301. static const struct file_operations md_seq_fops = {
  6302. .owner = THIS_MODULE,
  6303. .open = md_seq_open,
  6304. .read = seq_read,
  6305. .llseek = seq_lseek,
  6306. .release = seq_release_private,
  6307. .poll = mdstat_poll,
  6308. };
  6309. int register_md_personality(struct md_personality *p)
  6310. {
  6311. spin_lock(&pers_lock);
  6312. list_add_tail(&p->list, &pers_list);
  6313. printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
  6314. spin_unlock(&pers_lock);
  6315. return 0;
  6316. }
  6317. int unregister_md_personality(struct md_personality *p)
  6318. {
  6319. printk(KERN_INFO "md: %s personality unregistered\n", p->name);
  6320. spin_lock(&pers_lock);
  6321. list_del_init(&p->list);
  6322. spin_unlock(&pers_lock);
  6323. return 0;
  6324. }
  6325. static int is_mddev_idle(struct mddev *mddev, int init)
  6326. {
  6327. struct md_rdev * rdev;
  6328. int idle;
  6329. int curr_events;
  6330. idle = 1;
  6331. rcu_read_lock();
  6332. rdev_for_each_rcu(rdev, mddev) {
  6333. struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
  6334. curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
  6335. (int)part_stat_read(&disk->part0, sectors[1]) -
  6336. atomic_read(&disk->sync_io);
  6337. /* sync IO will cause sync_io to increase before the disk_stats
  6338. * as sync_io is counted when a request starts, and
  6339. * disk_stats is counted when it completes.
  6340. * So resync activity will cause curr_events to be smaller than
  6341. * when there was no such activity.
  6342. * non-sync IO will cause disk_stat to increase without
  6343. * increasing sync_io so curr_events will (eventually)
  6344. * be larger than it was before. Once it becomes
  6345. * substantially larger, the test below will cause
  6346. * the array to appear non-idle, and resync will slow
  6347. * down.
  6348. * If there is a lot of outstanding resync activity when
  6349. * we set last_event to curr_events, then all that activity
  6350. * completing might cause the array to appear non-idle
  6351. * and resync will be slowed down even though there might
  6352. * not have been non-resync activity. This will only
  6353. * happen once though. 'last_events' will soon reflect
  6354. * the state where there is little or no outstanding
  6355. * resync requests, and further resync activity will
  6356. * always make curr_events less than last_events.
  6357. *
  6358. */
  6359. if (init || curr_events - rdev->last_events > 64) {
  6360. rdev->last_events = curr_events;
  6361. idle = 0;
  6362. }
  6363. }
  6364. rcu_read_unlock();
  6365. return idle;
  6366. }
  6367. void md_done_sync(struct mddev *mddev, int blocks, int ok)
  6368. {
  6369. /* another "blocks" (512byte) blocks have been synced */
  6370. atomic_sub(blocks, &mddev->recovery_active);
  6371. wake_up(&mddev->recovery_wait);
  6372. if (!ok) {
  6373. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6374. md_wakeup_thread(mddev->thread);
  6375. // stop recovery, signal do_sync ....
  6376. }
  6377. }
  6378. /* md_write_start(mddev, bi)
  6379. * If we need to update some array metadata (e.g. 'active' flag
  6380. * in superblock) before writing, schedule a superblock update
  6381. * and wait for it to complete.
  6382. */
  6383. void md_write_start(struct mddev *mddev, struct bio *bi)
  6384. {
  6385. int did_change = 0;
  6386. if (bio_data_dir(bi) != WRITE)
  6387. return;
  6388. BUG_ON(mddev->ro == 1);
  6389. if (mddev->ro == 2) {
  6390. /* need to switch to read/write */
  6391. mddev->ro = 0;
  6392. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6393. md_wakeup_thread(mddev->thread);
  6394. md_wakeup_thread(mddev->sync_thread);
  6395. did_change = 1;
  6396. }
  6397. atomic_inc(&mddev->writes_pending);
  6398. if (mddev->safemode == 1)
  6399. mddev->safemode = 0;
  6400. if (mddev->in_sync) {
  6401. spin_lock_irq(&mddev->write_lock);
  6402. if (mddev->in_sync) {
  6403. mddev->in_sync = 0;
  6404. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  6405. set_bit(MD_CHANGE_PENDING, &mddev->flags);
  6406. md_wakeup_thread(mddev->thread);
  6407. did_change = 1;
  6408. }
  6409. spin_unlock_irq(&mddev->write_lock);
  6410. }
  6411. if (did_change)
  6412. sysfs_notify_dirent_safe(mddev->sysfs_state);
  6413. wait_event(mddev->sb_wait,
  6414. !test_bit(MD_CHANGE_PENDING, &mddev->flags));
  6415. }
  6416. void md_write_end(struct mddev *mddev)
  6417. {
  6418. if (atomic_dec_and_test(&mddev->writes_pending)) {
  6419. if (mddev->safemode == 2)
  6420. md_wakeup_thread(mddev->thread);
  6421. else if (mddev->safemode_delay)
  6422. mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
  6423. }
  6424. }
  6425. /* md_allow_write(mddev)
  6426. * Calling this ensures that the array is marked 'active' so that writes
  6427. * may proceed without blocking. It is important to call this before
  6428. * attempting a GFP_KERNEL allocation while holding the mddev lock.
  6429. * Must be called with mddev_lock held.
  6430. *
  6431. * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
  6432. * is dropped, so return -EAGAIN after notifying userspace.
  6433. */
  6434. int md_allow_write(struct mddev *mddev)
  6435. {
  6436. if (!mddev->pers)
  6437. return 0;
  6438. if (mddev->ro)
  6439. return 0;
  6440. if (!mddev->pers->sync_request)
  6441. return 0;
  6442. spin_lock_irq(&mddev->write_lock);
  6443. if (mddev->in_sync) {
  6444. mddev->in_sync = 0;
  6445. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  6446. set_bit(MD_CHANGE_PENDING, &mddev->flags);
  6447. if (mddev->safemode_delay &&
  6448. mddev->safemode == 0)
  6449. mddev->safemode = 1;
  6450. spin_unlock_irq(&mddev->write_lock);
  6451. md_update_sb(mddev, 0);
  6452. sysfs_notify_dirent_safe(mddev->sysfs_state);
  6453. } else
  6454. spin_unlock_irq(&mddev->write_lock);
  6455. if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
  6456. return -EAGAIN;
  6457. else
  6458. return 0;
  6459. }
  6460. EXPORT_SYMBOL_GPL(md_allow_write);
  6461. #define SYNC_MARKS 10
  6462. #define SYNC_MARK_STEP (3*HZ)
  6463. void md_do_sync(struct mddev *mddev)
  6464. {
  6465. struct mddev *mddev2;
  6466. unsigned int currspeed = 0,
  6467. window;
  6468. sector_t max_sectors,j, io_sectors;
  6469. unsigned long mark[SYNC_MARKS];
  6470. sector_t mark_cnt[SYNC_MARKS];
  6471. int last_mark,m;
  6472. struct list_head *tmp;
  6473. sector_t last_check;
  6474. int skipped = 0;
  6475. struct md_rdev *rdev;
  6476. char *desc;
  6477. struct blk_plug plug;
  6478. /* just incase thread restarts... */
  6479. if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
  6480. return;
  6481. if (mddev->ro) /* never try to sync a read-only array */
  6482. return;
  6483. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  6484. if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
  6485. desc = "data-check";
  6486. else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  6487. desc = "requested-resync";
  6488. else
  6489. desc = "resync";
  6490. } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  6491. desc = "reshape";
  6492. else
  6493. desc = "recovery";
  6494. /* we overload curr_resync somewhat here.
  6495. * 0 == not engaged in resync at all
  6496. * 2 == checking that there is no conflict with another sync
  6497. * 1 == like 2, but have yielded to allow conflicting resync to
  6498. * commense
  6499. * other == active in resync - this many blocks
  6500. *
  6501. * Before starting a resync we must have set curr_resync to
  6502. * 2, and then checked that every "conflicting" array has curr_resync
  6503. * less than ours. When we find one that is the same or higher
  6504. * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
  6505. * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
  6506. * This will mean we have to start checking from the beginning again.
  6507. *
  6508. */
  6509. do {
  6510. mddev->curr_resync = 2;
  6511. try_again:
  6512. if (kthread_should_stop())
  6513. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6514. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  6515. goto skip;
  6516. for_each_mddev(mddev2, tmp) {
  6517. if (mddev2 == mddev)
  6518. continue;
  6519. if (!mddev->parallel_resync
  6520. && mddev2->curr_resync
  6521. && match_mddev_units(mddev, mddev2)) {
  6522. DEFINE_WAIT(wq);
  6523. if (mddev < mddev2 && mddev->curr_resync == 2) {
  6524. /* arbitrarily yield */
  6525. mddev->curr_resync = 1;
  6526. wake_up(&resync_wait);
  6527. }
  6528. if (mddev > mddev2 && mddev->curr_resync == 1)
  6529. /* no need to wait here, we can wait the next
  6530. * time 'round when curr_resync == 2
  6531. */
  6532. continue;
  6533. /* We need to wait 'interruptible' so as not to
  6534. * contribute to the load average, and not to
  6535. * be caught by 'softlockup'
  6536. */
  6537. prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
  6538. if (!kthread_should_stop() &&
  6539. mddev2->curr_resync >= mddev->curr_resync) {
  6540. printk(KERN_INFO "md: delaying %s of %s"
  6541. " until %s has finished (they"
  6542. " share one or more physical units)\n",
  6543. desc, mdname(mddev), mdname(mddev2));
  6544. mddev_put(mddev2);
  6545. if (signal_pending(current))
  6546. flush_signals(current);
  6547. schedule();
  6548. finish_wait(&resync_wait, &wq);
  6549. goto try_again;
  6550. }
  6551. finish_wait(&resync_wait, &wq);
  6552. }
  6553. }
  6554. } while (mddev->curr_resync < 2);
  6555. j = 0;
  6556. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  6557. /* resync follows the size requested by the personality,
  6558. * which defaults to physical size, but can be virtual size
  6559. */
  6560. max_sectors = mddev->resync_max_sectors;
  6561. mddev->resync_mismatches = 0;
  6562. /* we don't use the checkpoint if there's a bitmap */
  6563. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  6564. j = mddev->resync_min;
  6565. else if (!mddev->bitmap)
  6566. j = mddev->recovery_cp;
  6567. } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  6568. max_sectors = mddev->resync_max_sectors;
  6569. else {
  6570. /* recovery follows the physical size of devices */
  6571. max_sectors = mddev->dev_sectors;
  6572. j = MaxSector;
  6573. rcu_read_lock();
  6574. rdev_for_each_rcu(rdev, mddev)
  6575. if (rdev->raid_disk >= 0 &&
  6576. !test_bit(Faulty, &rdev->flags) &&
  6577. !test_bit(In_sync, &rdev->flags) &&
  6578. rdev->recovery_offset < j)
  6579. j = rdev->recovery_offset;
  6580. rcu_read_unlock();
  6581. }
  6582. printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
  6583. printk(KERN_INFO "md: minimum _guaranteed_ speed:"
  6584. " %d KB/sec/disk.\n", speed_min(mddev));
  6585. printk(KERN_INFO "md: using maximum available idle IO bandwidth "
  6586. "(but not more than %d KB/sec) for %s.\n",
  6587. speed_max(mddev), desc);
  6588. is_mddev_idle(mddev, 1); /* this initializes IO event counters */
  6589. io_sectors = 0;
  6590. for (m = 0; m < SYNC_MARKS; m++) {
  6591. mark[m] = jiffies;
  6592. mark_cnt[m] = io_sectors;
  6593. }
  6594. last_mark = 0;
  6595. mddev->resync_mark = mark[last_mark];
  6596. mddev->resync_mark_cnt = mark_cnt[last_mark];
  6597. /*
  6598. * Tune reconstruction:
  6599. */
  6600. window = 32*(PAGE_SIZE/512);
  6601. printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
  6602. window/2, (unsigned long long)max_sectors/2);
  6603. atomic_set(&mddev->recovery_active, 0);
  6604. last_check = 0;
  6605. if (j>2) {
  6606. printk(KERN_INFO
  6607. "md: resuming %s of %s from checkpoint.\n",
  6608. desc, mdname(mddev));
  6609. mddev->curr_resync = j;
  6610. }
  6611. mddev->curr_resync_completed = j;
  6612. blk_start_plug(&plug);
  6613. while (j < max_sectors) {
  6614. sector_t sectors;
  6615. skipped = 0;
  6616. if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  6617. ((mddev->curr_resync > mddev->curr_resync_completed &&
  6618. (mddev->curr_resync - mddev->curr_resync_completed)
  6619. > (max_sectors >> 4)) ||
  6620. (j - mddev->curr_resync_completed)*2
  6621. >= mddev->resync_max - mddev->curr_resync_completed
  6622. )) {
  6623. /* time to update curr_resync_completed */
  6624. wait_event(mddev->recovery_wait,
  6625. atomic_read(&mddev->recovery_active) == 0);
  6626. mddev->curr_resync_completed = j;
  6627. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  6628. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  6629. }
  6630. while (j >= mddev->resync_max && !kthread_should_stop()) {
  6631. /* As this condition is controlled by user-space,
  6632. * we can block indefinitely, so use '_interruptible'
  6633. * to avoid triggering warnings.
  6634. */
  6635. flush_signals(current); /* just in case */
  6636. wait_event_interruptible(mddev->recovery_wait,
  6637. mddev->resync_max > j
  6638. || kthread_should_stop());
  6639. }
  6640. if (kthread_should_stop())
  6641. goto interrupted;
  6642. sectors = mddev->pers->sync_request(mddev, j, &skipped,
  6643. currspeed < speed_min(mddev));
  6644. if (sectors == 0) {
  6645. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6646. goto out;
  6647. }
  6648. if (!skipped) { /* actual IO requested */
  6649. io_sectors += sectors;
  6650. atomic_add(sectors, &mddev->recovery_active);
  6651. }
  6652. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  6653. break;
  6654. j += sectors;
  6655. if (j>1) mddev->curr_resync = j;
  6656. mddev->curr_mark_cnt = io_sectors;
  6657. if (last_check == 0)
  6658. /* this is the earliest that rebuild will be
  6659. * visible in /proc/mdstat
  6660. */
  6661. md_new_event(mddev);
  6662. if (last_check + window > io_sectors || j == max_sectors)
  6663. continue;
  6664. last_check = io_sectors;
  6665. repeat:
  6666. if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
  6667. /* step marks */
  6668. int next = (last_mark+1) % SYNC_MARKS;
  6669. mddev->resync_mark = mark[next];
  6670. mddev->resync_mark_cnt = mark_cnt[next];
  6671. mark[next] = jiffies;
  6672. mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
  6673. last_mark = next;
  6674. }
  6675. if (kthread_should_stop())
  6676. goto interrupted;
  6677. /*
  6678. * this loop exits only if either when we are slower than
  6679. * the 'hard' speed limit, or the system was IO-idle for
  6680. * a jiffy.
  6681. * the system might be non-idle CPU-wise, but we only care
  6682. * about not overloading the IO subsystem. (things like an
  6683. * e2fsck being done on the RAID array should execute fast)
  6684. */
  6685. cond_resched();
  6686. currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
  6687. /((jiffies-mddev->resync_mark)/HZ +1) +1;
  6688. if (currspeed > speed_min(mddev)) {
  6689. if ((currspeed > speed_max(mddev)) ||
  6690. !is_mddev_idle(mddev, 0)) {
  6691. msleep(500);
  6692. goto repeat;
  6693. }
  6694. }
  6695. }
  6696. printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
  6697. /*
  6698. * this also signals 'finished resyncing' to md_stop
  6699. */
  6700. out:
  6701. blk_finish_plug(&plug);
  6702. wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
  6703. /* tell personality that we are finished */
  6704. mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
  6705. if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
  6706. mddev->curr_resync > 2) {
  6707. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  6708. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  6709. if (mddev->curr_resync >= mddev->recovery_cp) {
  6710. printk(KERN_INFO
  6711. "md: checkpointing %s of %s.\n",
  6712. desc, mdname(mddev));
  6713. mddev->recovery_cp =
  6714. mddev->curr_resync_completed;
  6715. }
  6716. } else
  6717. mddev->recovery_cp = MaxSector;
  6718. } else {
  6719. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  6720. mddev->curr_resync = MaxSector;
  6721. rcu_read_lock();
  6722. rdev_for_each_rcu(rdev, mddev)
  6723. if (rdev->raid_disk >= 0 &&
  6724. mddev->delta_disks >= 0 &&
  6725. !test_bit(Faulty, &rdev->flags) &&
  6726. !test_bit(In_sync, &rdev->flags) &&
  6727. rdev->recovery_offset < mddev->curr_resync)
  6728. rdev->recovery_offset = mddev->curr_resync;
  6729. rcu_read_unlock();
  6730. }
  6731. }
  6732. skip:
  6733. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  6734. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  6735. /* We completed so min/max setting can be forgotten if used. */
  6736. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  6737. mddev->resync_min = 0;
  6738. mddev->resync_max = MaxSector;
  6739. } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  6740. mddev->resync_min = mddev->curr_resync_completed;
  6741. mddev->curr_resync = 0;
  6742. wake_up(&resync_wait);
  6743. set_bit(MD_RECOVERY_DONE, &mddev->recovery);
  6744. md_wakeup_thread(mddev->thread);
  6745. return;
  6746. interrupted:
  6747. /*
  6748. * got a signal, exit.
  6749. */
  6750. printk(KERN_INFO
  6751. "md: md_do_sync() got signal ... exiting\n");
  6752. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6753. goto out;
  6754. }
  6755. EXPORT_SYMBOL_GPL(md_do_sync);
  6756. static int remove_and_add_spares(struct mddev *mddev)
  6757. {
  6758. struct md_rdev *rdev;
  6759. int spares = 0;
  6760. int removed = 0;
  6761. mddev->curr_resync_completed = 0;
  6762. rdev_for_each(rdev, mddev)
  6763. if (rdev->raid_disk >= 0 &&
  6764. !test_bit(Blocked, &rdev->flags) &&
  6765. (test_bit(Faulty, &rdev->flags) ||
  6766. ! test_bit(In_sync, &rdev->flags)) &&
  6767. atomic_read(&rdev->nr_pending)==0) {
  6768. if (mddev->pers->hot_remove_disk(
  6769. mddev, rdev) == 0) {
  6770. sysfs_unlink_rdev(mddev, rdev);
  6771. rdev->raid_disk = -1;
  6772. removed++;
  6773. }
  6774. }
  6775. if (removed)
  6776. sysfs_notify(&mddev->kobj, NULL,
  6777. "degraded");
  6778. rdev_for_each(rdev, mddev) {
  6779. if (rdev->raid_disk >= 0 &&
  6780. !test_bit(In_sync, &rdev->flags) &&
  6781. !test_bit(Faulty, &rdev->flags))
  6782. spares++;
  6783. if (rdev->raid_disk < 0
  6784. && !test_bit(Faulty, &rdev->flags)) {
  6785. rdev->recovery_offset = 0;
  6786. if (mddev->pers->
  6787. hot_add_disk(mddev, rdev) == 0) {
  6788. if (sysfs_link_rdev(mddev, rdev))
  6789. /* failure here is OK */;
  6790. spares++;
  6791. md_new_event(mddev);
  6792. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  6793. }
  6794. }
  6795. }
  6796. return spares;
  6797. }
  6798. static void reap_sync_thread(struct mddev *mddev)
  6799. {
  6800. struct md_rdev *rdev;
  6801. /* resync has finished, collect result */
  6802. md_unregister_thread(&mddev->sync_thread);
  6803. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
  6804. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  6805. /* success...*/
  6806. /* activate any spares */
  6807. if (mddev->pers->spare_active(mddev))
  6808. sysfs_notify(&mddev->kobj, NULL,
  6809. "degraded");
  6810. }
  6811. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  6812. mddev->pers->finish_reshape)
  6813. mddev->pers->finish_reshape(mddev);
  6814. /* If array is no-longer degraded, then any saved_raid_disk
  6815. * information must be scrapped. Also if any device is now
  6816. * In_sync we must scrape the saved_raid_disk for that device
  6817. * do the superblock for an incrementally recovered device
  6818. * written out.
  6819. */
  6820. rdev_for_each(rdev, mddev)
  6821. if (!mddev->degraded ||
  6822. test_bit(In_sync, &rdev->flags))
  6823. rdev->saved_raid_disk = -1;
  6824. md_update_sb(mddev, 1);
  6825. clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  6826. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  6827. clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  6828. clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  6829. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  6830. /* flag recovery needed just to double check */
  6831. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6832. sysfs_notify_dirent_safe(mddev->sysfs_action);
  6833. md_new_event(mddev);
  6834. if (mddev->event_work.func)
  6835. queue_work(md_misc_wq, &mddev->event_work);
  6836. }
  6837. /*
  6838. * This routine is regularly called by all per-raid-array threads to
  6839. * deal with generic issues like resync and super-block update.
  6840. * Raid personalities that don't have a thread (linear/raid0) do not
  6841. * need this as they never do any recovery or update the superblock.
  6842. *
  6843. * It does not do any resync itself, but rather "forks" off other threads
  6844. * to do that as needed.
  6845. * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
  6846. * "->recovery" and create a thread at ->sync_thread.
  6847. * When the thread finishes it sets MD_RECOVERY_DONE
  6848. * and wakeups up this thread which will reap the thread and finish up.
  6849. * This thread also removes any faulty devices (with nr_pending == 0).
  6850. *
  6851. * The overall approach is:
  6852. * 1/ if the superblock needs updating, update it.
  6853. * 2/ If a recovery thread is running, don't do anything else.
  6854. * 3/ If recovery has finished, clean up, possibly marking spares active.
  6855. * 4/ If there are any faulty devices, remove them.
  6856. * 5/ If array is degraded, try to add spares devices
  6857. * 6/ If array has spares or is not in-sync, start a resync thread.
  6858. */
  6859. void md_check_recovery(struct mddev *mddev)
  6860. {
  6861. if (mddev->suspended)
  6862. return;
  6863. if (mddev->bitmap)
  6864. bitmap_daemon_work(mddev);
  6865. if (signal_pending(current)) {
  6866. if (mddev->pers->sync_request && !mddev->external) {
  6867. printk(KERN_INFO "md: %s in immediate safe mode\n",
  6868. mdname(mddev));
  6869. mddev->safemode = 2;
  6870. }
  6871. flush_signals(current);
  6872. }
  6873. if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
  6874. return;
  6875. if ( ! (
  6876. (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
  6877. test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
  6878. test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
  6879. (mddev->external == 0 && mddev->safemode == 1) ||
  6880. (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
  6881. && !mddev->in_sync && mddev->recovery_cp == MaxSector)
  6882. ))
  6883. return;
  6884. if (mddev_trylock(mddev)) {
  6885. int spares = 0;
  6886. if (mddev->ro) {
  6887. /* Only thing we do on a ro array is remove
  6888. * failed devices.
  6889. */
  6890. struct md_rdev *rdev;
  6891. rdev_for_each(rdev, mddev)
  6892. if (rdev->raid_disk >= 0 &&
  6893. !test_bit(Blocked, &rdev->flags) &&
  6894. test_bit(Faulty, &rdev->flags) &&
  6895. atomic_read(&rdev->nr_pending)==0) {
  6896. if (mddev->pers->hot_remove_disk(
  6897. mddev, rdev) == 0) {
  6898. sysfs_unlink_rdev(mddev, rdev);
  6899. rdev->raid_disk = -1;
  6900. }
  6901. }
  6902. clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6903. goto unlock;
  6904. }
  6905. if (!mddev->external) {
  6906. int did_change = 0;
  6907. spin_lock_irq(&mddev->write_lock);
  6908. if (mddev->safemode &&
  6909. !atomic_read(&mddev->writes_pending) &&
  6910. !mddev->in_sync &&
  6911. mddev->recovery_cp == MaxSector) {
  6912. mddev->in_sync = 1;
  6913. did_change = 1;
  6914. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  6915. }
  6916. if (mddev->safemode == 1)
  6917. mddev->safemode = 0;
  6918. spin_unlock_irq(&mddev->write_lock);
  6919. if (did_change)
  6920. sysfs_notify_dirent_safe(mddev->sysfs_state);
  6921. }
  6922. if (mddev->flags)
  6923. md_update_sb(mddev, 0);
  6924. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
  6925. !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
  6926. /* resync/recovery still happening */
  6927. clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6928. goto unlock;
  6929. }
  6930. if (mddev->sync_thread) {
  6931. reap_sync_thread(mddev);
  6932. goto unlock;
  6933. }
  6934. /* Set RUNNING before clearing NEEDED to avoid
  6935. * any transients in the value of "sync_action".
  6936. */
  6937. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  6938. /* Clear some bits that don't mean anything, but
  6939. * might be left set
  6940. */
  6941. clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6942. clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
  6943. if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
  6944. test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
  6945. goto unlock;
  6946. /* no recovery is running.
  6947. * remove any failed drives, then
  6948. * add spares if possible.
  6949. * Spare are also removed and re-added, to allow
  6950. * the personality to fail the re-add.
  6951. */
  6952. if (mddev->reshape_position != MaxSector) {
  6953. if (mddev->pers->check_reshape == NULL ||
  6954. mddev->pers->check_reshape(mddev) != 0)
  6955. /* Cannot proceed */
  6956. goto unlock;
  6957. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  6958. clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  6959. } else if ((spares = remove_and_add_spares(mddev))) {
  6960. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  6961. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  6962. clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  6963. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  6964. } else if (mddev->recovery_cp < MaxSector) {
  6965. set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  6966. clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  6967. } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
  6968. /* nothing to be done ... */
  6969. goto unlock;
  6970. if (mddev->pers->sync_request) {
  6971. if (spares) {
  6972. /* We are adding a device or devices to an array
  6973. * which has the bitmap stored on all devices.
  6974. * So make sure all bitmap pages get written
  6975. */
  6976. bitmap_write_all(mddev->bitmap);
  6977. }
  6978. mddev->sync_thread = md_register_thread(md_do_sync,
  6979. mddev,
  6980. "resync");
  6981. if (!mddev->sync_thread) {
  6982. printk(KERN_ERR "%s: could not start resync"
  6983. " thread...\n",
  6984. mdname(mddev));
  6985. /* leave the spares where they are, it shouldn't hurt */
  6986. clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  6987. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  6988. clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  6989. clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  6990. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  6991. } else
  6992. md_wakeup_thread(mddev->sync_thread);
  6993. sysfs_notify_dirent_safe(mddev->sysfs_action);
  6994. md_new_event(mddev);
  6995. }
  6996. unlock:
  6997. if (!mddev->sync_thread) {
  6998. clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  6999. if (test_and_clear_bit(MD_RECOVERY_RECOVER,
  7000. &mddev->recovery))
  7001. if (mddev->sysfs_action)
  7002. sysfs_notify_dirent_safe(mddev->sysfs_action);
  7003. }
  7004. mddev_unlock(mddev);
  7005. }
  7006. }
  7007. void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
  7008. {
  7009. sysfs_notify_dirent_safe(rdev->sysfs_state);
  7010. wait_event_timeout(rdev->blocked_wait,
  7011. !test_bit(Blocked, &rdev->flags) &&
  7012. !test_bit(BlockedBadBlocks, &rdev->flags),
  7013. msecs_to_jiffies(5000));
  7014. rdev_dec_pending(rdev, mddev);
  7015. }
  7016. EXPORT_SYMBOL(md_wait_for_blocked_rdev);
  7017. void md_finish_reshape(struct mddev *mddev)
  7018. {
  7019. /* called be personality module when reshape completes. */
  7020. struct md_rdev *rdev;
  7021. rdev_for_each(rdev, mddev) {
  7022. if (rdev->data_offset > rdev->new_data_offset)
  7023. rdev->sectors += rdev->data_offset - rdev->new_data_offset;
  7024. else
  7025. rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
  7026. rdev->data_offset = rdev->new_data_offset;
  7027. }
  7028. }
  7029. EXPORT_SYMBOL(md_finish_reshape);
  7030. /* Bad block management.
  7031. * We can record which blocks on each device are 'bad' and so just
  7032. * fail those blocks, or that stripe, rather than the whole device.
  7033. * Entries in the bad-block table are 64bits wide. This comprises:
  7034. * Length of bad-range, in sectors: 0-511 for lengths 1-512
  7035. * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
  7036. * A 'shift' can be set so that larger blocks are tracked and
  7037. * consequently larger devices can be covered.
  7038. * 'Acknowledged' flag - 1 bit. - the most significant bit.
  7039. *
  7040. * Locking of the bad-block table uses a seqlock so md_is_badblock
  7041. * might need to retry if it is very unlucky.
  7042. * We will sometimes want to check for bad blocks in a bi_end_io function,
  7043. * so we use the write_seqlock_irq variant.
  7044. *
  7045. * When looking for a bad block we specify a range and want to
  7046. * know if any block in the range is bad. So we binary-search
  7047. * to the last range that starts at-or-before the given endpoint,
  7048. * (or "before the sector after the target range")
  7049. * then see if it ends after the given start.
  7050. * We return
  7051. * 0 if there are no known bad blocks in the range
  7052. * 1 if there are known bad block which are all acknowledged
  7053. * -1 if there are bad blocks which have not yet been acknowledged in metadata.
  7054. * plus the start/length of the first bad section we overlap.
  7055. */
  7056. int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
  7057. sector_t *first_bad, int *bad_sectors)
  7058. {
  7059. int hi;
  7060. int lo = 0;
  7061. u64 *p = bb->page;
  7062. int rv = 0;
  7063. sector_t target = s + sectors;
  7064. unsigned seq;
  7065. if (bb->shift > 0) {
  7066. /* round the start down, and the end up */
  7067. s >>= bb->shift;
  7068. target += (1<<bb->shift) - 1;
  7069. target >>= bb->shift;
  7070. sectors = target - s;
  7071. }
  7072. /* 'target' is now the first block after the bad range */
  7073. retry:
  7074. seq = read_seqbegin(&bb->lock);
  7075. hi = bb->count;
  7076. /* Binary search between lo and hi for 'target'
  7077. * i.e. for the last range that starts before 'target'
  7078. */
  7079. /* INVARIANT: ranges before 'lo' and at-or-after 'hi'
  7080. * are known not to be the last range before target.
  7081. * VARIANT: hi-lo is the number of possible
  7082. * ranges, and decreases until it reaches 1
  7083. */
  7084. while (hi - lo > 1) {
  7085. int mid = (lo + hi) / 2;
  7086. sector_t a = BB_OFFSET(p[mid]);
  7087. if (a < target)
  7088. /* This could still be the one, earlier ranges
  7089. * could not. */
  7090. lo = mid;
  7091. else
  7092. /* This and later ranges are definitely out. */
  7093. hi = mid;
  7094. }
  7095. /* 'lo' might be the last that started before target, but 'hi' isn't */
  7096. if (hi > lo) {
  7097. /* need to check all range that end after 's' to see if
  7098. * any are unacknowledged.
  7099. */
  7100. while (lo >= 0 &&
  7101. BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
  7102. if (BB_OFFSET(p[lo]) < target) {
  7103. /* starts before the end, and finishes after
  7104. * the start, so they must overlap
  7105. */
  7106. if (rv != -1 && BB_ACK(p[lo]))
  7107. rv = 1;
  7108. else
  7109. rv = -1;
  7110. *first_bad = BB_OFFSET(p[lo]);
  7111. *bad_sectors = BB_LEN(p[lo]);
  7112. }
  7113. lo--;
  7114. }
  7115. }
  7116. if (read_seqretry(&bb->lock, seq))
  7117. goto retry;
  7118. return rv;
  7119. }
  7120. EXPORT_SYMBOL_GPL(md_is_badblock);
  7121. /*
  7122. * Add a range of bad blocks to the table.
  7123. * This might extend the table, or might contract it
  7124. * if two adjacent ranges can be merged.
  7125. * We binary-search to find the 'insertion' point, then
  7126. * decide how best to handle it.
  7127. */
  7128. static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
  7129. int acknowledged)
  7130. {
  7131. u64 *p;
  7132. int lo, hi;
  7133. int rv = 1;
  7134. if (bb->shift < 0)
  7135. /* badblocks are disabled */
  7136. return 0;
  7137. if (bb->shift) {
  7138. /* round the start down, and the end up */
  7139. sector_t next = s + sectors;
  7140. s >>= bb->shift;
  7141. next += (1<<bb->shift) - 1;
  7142. next >>= bb->shift;
  7143. sectors = next - s;
  7144. }
  7145. write_seqlock_irq(&bb->lock);
  7146. p = bb->page;
  7147. lo = 0;
  7148. hi = bb->count;
  7149. /* Find the last range that starts at-or-before 's' */
  7150. while (hi - lo > 1) {
  7151. int mid = (lo + hi) / 2;
  7152. sector_t a = BB_OFFSET(p[mid]);
  7153. if (a <= s)
  7154. lo = mid;
  7155. else
  7156. hi = mid;
  7157. }
  7158. if (hi > lo && BB_OFFSET(p[lo]) > s)
  7159. hi = lo;
  7160. if (hi > lo) {
  7161. /* we found a range that might merge with the start
  7162. * of our new range
  7163. */
  7164. sector_t a = BB_OFFSET(p[lo]);
  7165. sector_t e = a + BB_LEN(p[lo]);
  7166. int ack = BB_ACK(p[lo]);
  7167. if (e >= s) {
  7168. /* Yes, we can merge with a previous range */
  7169. if (s == a && s + sectors >= e)
  7170. /* new range covers old */
  7171. ack = acknowledged;
  7172. else
  7173. ack = ack && acknowledged;
  7174. if (e < s + sectors)
  7175. e = s + sectors;
  7176. if (e - a <= BB_MAX_LEN) {
  7177. p[lo] = BB_MAKE(a, e-a, ack);
  7178. s = e;
  7179. } else {
  7180. /* does not all fit in one range,
  7181. * make p[lo] maximal
  7182. */
  7183. if (BB_LEN(p[lo]) != BB_MAX_LEN)
  7184. p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
  7185. s = a + BB_MAX_LEN;
  7186. }
  7187. sectors = e - s;
  7188. }
  7189. }
  7190. if (sectors && hi < bb->count) {
  7191. /* 'hi' points to the first range that starts after 's'.
  7192. * Maybe we can merge with the start of that range */
  7193. sector_t a = BB_OFFSET(p[hi]);
  7194. sector_t e = a + BB_LEN(p[hi]);
  7195. int ack = BB_ACK(p[hi]);
  7196. if (a <= s + sectors) {
  7197. /* merging is possible */
  7198. if (e <= s + sectors) {
  7199. /* full overlap */
  7200. e = s + sectors;
  7201. ack = acknowledged;
  7202. } else
  7203. ack = ack && acknowledged;
  7204. a = s;
  7205. if (e - a <= BB_MAX_LEN) {
  7206. p[hi] = BB_MAKE(a, e-a, ack);
  7207. s = e;
  7208. } else {
  7209. p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
  7210. s = a + BB_MAX_LEN;
  7211. }
  7212. sectors = e - s;
  7213. lo = hi;
  7214. hi++;
  7215. }
  7216. }
  7217. if (sectors == 0 && hi < bb->count) {
  7218. /* we might be able to combine lo and hi */
  7219. /* Note: 's' is at the end of 'lo' */
  7220. sector_t a = BB_OFFSET(p[hi]);
  7221. int lolen = BB_LEN(p[lo]);
  7222. int hilen = BB_LEN(p[hi]);
  7223. int newlen = lolen + hilen - (s - a);
  7224. if (s >= a && newlen < BB_MAX_LEN) {
  7225. /* yes, we can combine them */
  7226. int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
  7227. p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
  7228. memmove(p + hi, p + hi + 1,
  7229. (bb->count - hi - 1) * 8);
  7230. bb->count--;
  7231. }
  7232. }
  7233. while (sectors) {
  7234. /* didn't merge (it all).
  7235. * Need to add a range just before 'hi' */
  7236. if (bb->count >= MD_MAX_BADBLOCKS) {
  7237. /* No room for more */
  7238. rv = 0;
  7239. break;
  7240. } else {
  7241. int this_sectors = sectors;
  7242. memmove(p + hi + 1, p + hi,
  7243. (bb->count - hi) * 8);
  7244. bb->count++;
  7245. if (this_sectors > BB_MAX_LEN)
  7246. this_sectors = BB_MAX_LEN;
  7247. p[hi] = BB_MAKE(s, this_sectors, acknowledged);
  7248. sectors -= this_sectors;
  7249. s += this_sectors;
  7250. }
  7251. }
  7252. bb->changed = 1;
  7253. if (!acknowledged)
  7254. bb->unacked_exist = 1;
  7255. write_sequnlock_irq(&bb->lock);
  7256. return rv;
  7257. }
  7258. int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
  7259. int is_new)
  7260. {
  7261. int rv;
  7262. if (is_new)
  7263. s += rdev->new_data_offset;
  7264. else
  7265. s += rdev->data_offset;
  7266. rv = md_set_badblocks(&rdev->badblocks,
  7267. s, sectors, 0);
  7268. if (rv) {
  7269. /* Make sure they get written out promptly */
  7270. sysfs_notify_dirent_safe(rdev->sysfs_state);
  7271. set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
  7272. md_wakeup_thread(rdev->mddev->thread);
  7273. }
  7274. return rv;
  7275. }
  7276. EXPORT_SYMBOL_GPL(rdev_set_badblocks);
  7277. /*
  7278. * Remove a range of bad blocks from the table.
  7279. * This may involve extending the table if we spilt a region,
  7280. * but it must not fail. So if the table becomes full, we just
  7281. * drop the remove request.
  7282. */
  7283. static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
  7284. {
  7285. u64 *p;
  7286. int lo, hi;
  7287. sector_t target = s + sectors;
  7288. int rv = 0;
  7289. if (bb->shift > 0) {
  7290. /* When clearing we round the start up and the end down.
  7291. * This should not matter as the shift should align with
  7292. * the block size and no rounding should ever be needed.
  7293. * However it is better the think a block is bad when it
  7294. * isn't than to think a block is not bad when it is.
  7295. */
  7296. s += (1<<bb->shift) - 1;
  7297. s >>= bb->shift;
  7298. target >>= bb->shift;
  7299. sectors = target - s;
  7300. }
  7301. write_seqlock_irq(&bb->lock);
  7302. p = bb->page;
  7303. lo = 0;
  7304. hi = bb->count;
  7305. /* Find the last range that starts before 'target' */
  7306. while (hi - lo > 1) {
  7307. int mid = (lo + hi) / 2;
  7308. sector_t a = BB_OFFSET(p[mid]);
  7309. if (a < target)
  7310. lo = mid;
  7311. else
  7312. hi = mid;
  7313. }
  7314. if (hi > lo) {
  7315. /* p[lo] is the last range that could overlap the
  7316. * current range. Earlier ranges could also overlap,
  7317. * but only this one can overlap the end of the range.
  7318. */
  7319. if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
  7320. /* Partial overlap, leave the tail of this range */
  7321. int ack = BB_ACK(p[lo]);
  7322. sector_t a = BB_OFFSET(p[lo]);
  7323. sector_t end = a + BB_LEN(p[lo]);
  7324. if (a < s) {
  7325. /* we need to split this range */
  7326. if (bb->count >= MD_MAX_BADBLOCKS) {
  7327. rv = 0;
  7328. goto out;
  7329. }
  7330. memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
  7331. bb->count++;
  7332. p[lo] = BB_MAKE(a, s-a, ack);
  7333. lo++;
  7334. }
  7335. p[lo] = BB_MAKE(target, end - target, ack);
  7336. /* there is no longer an overlap */
  7337. hi = lo;
  7338. lo--;
  7339. }
  7340. while (lo >= 0 &&
  7341. BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
  7342. /* This range does overlap */
  7343. if (BB_OFFSET(p[lo]) < s) {
  7344. /* Keep the early parts of this range. */
  7345. int ack = BB_ACK(p[lo]);
  7346. sector_t start = BB_OFFSET(p[lo]);
  7347. p[lo] = BB_MAKE(start, s - start, ack);
  7348. /* now low doesn't overlap, so.. */
  7349. break;
  7350. }
  7351. lo--;
  7352. }
  7353. /* 'lo' is strictly before, 'hi' is strictly after,
  7354. * anything between needs to be discarded
  7355. */
  7356. if (hi - lo > 1) {
  7357. memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
  7358. bb->count -= (hi - lo - 1);
  7359. }
  7360. }
  7361. bb->changed = 1;
  7362. out:
  7363. write_sequnlock_irq(&bb->lock);
  7364. return rv;
  7365. }
  7366. int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
  7367. int is_new)
  7368. {
  7369. if (is_new)
  7370. s += rdev->new_data_offset;
  7371. else
  7372. s += rdev->data_offset;
  7373. return md_clear_badblocks(&rdev->badblocks,
  7374. s, sectors);
  7375. }
  7376. EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
  7377. /*
  7378. * Acknowledge all bad blocks in a list.
  7379. * This only succeeds if ->changed is clear. It is used by
  7380. * in-kernel metadata updates
  7381. */
  7382. void md_ack_all_badblocks(struct badblocks *bb)
  7383. {
  7384. if (bb->page == NULL || bb->changed)
  7385. /* no point even trying */
  7386. return;
  7387. write_seqlock_irq(&bb->lock);
  7388. if (bb->changed == 0 && bb->unacked_exist) {
  7389. u64 *p = bb->page;
  7390. int i;
  7391. for (i = 0; i < bb->count ; i++) {
  7392. if (!BB_ACK(p[i])) {
  7393. sector_t start = BB_OFFSET(p[i]);
  7394. int len = BB_LEN(p[i]);
  7395. p[i] = BB_MAKE(start, len, 1);
  7396. }
  7397. }
  7398. bb->unacked_exist = 0;
  7399. }
  7400. write_sequnlock_irq(&bb->lock);
  7401. }
  7402. EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
  7403. /* sysfs access to bad-blocks list.
  7404. * We present two files.
  7405. * 'bad-blocks' lists sector numbers and lengths of ranges that
  7406. * are recorded as bad. The list is truncated to fit within
  7407. * the one-page limit of sysfs.
  7408. * Writing "sector length" to this file adds an acknowledged
  7409. * bad block list.
  7410. * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
  7411. * been acknowledged. Writing to this file adds bad blocks
  7412. * without acknowledging them. This is largely for testing.
  7413. */
  7414. static ssize_t
  7415. badblocks_show(struct badblocks *bb, char *page, int unack)
  7416. {
  7417. size_t len;
  7418. int i;
  7419. u64 *p = bb->page;
  7420. unsigned seq;
  7421. if (bb->shift < 0)
  7422. return 0;
  7423. retry:
  7424. seq = read_seqbegin(&bb->lock);
  7425. len = 0;
  7426. i = 0;
  7427. while (len < PAGE_SIZE && i < bb->count) {
  7428. sector_t s = BB_OFFSET(p[i]);
  7429. unsigned int length = BB_LEN(p[i]);
  7430. int ack = BB_ACK(p[i]);
  7431. i++;
  7432. if (unack && ack)
  7433. continue;
  7434. len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
  7435. (unsigned long long)s << bb->shift,
  7436. length << bb->shift);
  7437. }
  7438. if (unack && len == 0)
  7439. bb->unacked_exist = 0;
  7440. if (read_seqretry(&bb->lock, seq))
  7441. goto retry;
  7442. return len;
  7443. }
  7444. #define DO_DEBUG 1
  7445. static ssize_t
  7446. badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
  7447. {
  7448. unsigned long long sector;
  7449. int length;
  7450. char newline;
  7451. #ifdef DO_DEBUG
  7452. /* Allow clearing via sysfs *only* for testing/debugging.
  7453. * Normally only a successful write may clear a badblock
  7454. */
  7455. int clear = 0;
  7456. if (page[0] == '-') {
  7457. clear = 1;
  7458. page++;
  7459. }
  7460. #endif /* DO_DEBUG */
  7461. switch (sscanf(page, "%llu %d%c", &sector, &length, &newline)) {
  7462. case 3:
  7463. if (newline != '\n')
  7464. return -EINVAL;
  7465. case 2:
  7466. if (length <= 0)
  7467. return -EINVAL;
  7468. break;
  7469. default:
  7470. return -EINVAL;
  7471. }
  7472. #ifdef DO_DEBUG
  7473. if (clear) {
  7474. md_clear_badblocks(bb, sector, length);
  7475. return len;
  7476. }
  7477. #endif /* DO_DEBUG */
  7478. if (md_set_badblocks(bb, sector, length, !unack))
  7479. return len;
  7480. else
  7481. return -ENOSPC;
  7482. }
  7483. static int md_notify_reboot(struct notifier_block *this,
  7484. unsigned long code, void *x)
  7485. {
  7486. struct list_head *tmp;
  7487. struct mddev *mddev;
  7488. int need_delay = 0;
  7489. for_each_mddev(mddev, tmp) {
  7490. if (mddev_trylock(mddev)) {
  7491. if (mddev->pers)
  7492. __md_stop_writes(mddev);
  7493. mddev->safemode = 2;
  7494. mddev_unlock(mddev);
  7495. }
  7496. need_delay = 1;
  7497. }
  7498. /*
  7499. * certain more exotic SCSI devices are known to be
  7500. * volatile wrt too early system reboots. While the
  7501. * right place to handle this issue is the given
  7502. * driver, we do want to have a safe RAID driver ...
  7503. */
  7504. if (need_delay)
  7505. mdelay(1000*1);
  7506. return NOTIFY_DONE;
  7507. }
  7508. static struct notifier_block md_notifier = {
  7509. .notifier_call = md_notify_reboot,
  7510. .next = NULL,
  7511. .priority = INT_MAX, /* before any real devices */
  7512. };
  7513. static void md_geninit(void)
  7514. {
  7515. pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
  7516. proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
  7517. }
  7518. static int __init md_init(void)
  7519. {
  7520. int ret = -ENOMEM;
  7521. md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
  7522. if (!md_wq)
  7523. goto err_wq;
  7524. md_misc_wq = alloc_workqueue("md_misc", 0, 0);
  7525. if (!md_misc_wq)
  7526. goto err_misc_wq;
  7527. if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
  7528. goto err_md;
  7529. if ((ret = register_blkdev(0, "mdp")) < 0)
  7530. goto err_mdp;
  7531. mdp_major = ret;
  7532. blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
  7533. md_probe, NULL, NULL);
  7534. blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
  7535. md_probe, NULL, NULL);
  7536. register_reboot_notifier(&md_notifier);
  7537. raid_table_header = register_sysctl_table(raid_root_table);
  7538. md_geninit();
  7539. return 0;
  7540. err_mdp:
  7541. unregister_blkdev(MD_MAJOR, "md");
  7542. err_md:
  7543. destroy_workqueue(md_misc_wq);
  7544. err_misc_wq:
  7545. destroy_workqueue(md_wq);
  7546. err_wq:
  7547. return ret;
  7548. }
  7549. #ifndef MODULE
  7550. /*
  7551. * Searches all registered partitions for autorun RAID arrays
  7552. * at boot time.
  7553. */
  7554. static LIST_HEAD(all_detected_devices);
  7555. struct detected_devices_node {
  7556. struct list_head list;
  7557. dev_t dev;
  7558. };
  7559. void md_autodetect_dev(dev_t dev)
  7560. {
  7561. struct detected_devices_node *node_detected_dev;
  7562. node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
  7563. if (node_detected_dev) {
  7564. node_detected_dev->dev = dev;
  7565. list_add_tail(&node_detected_dev->list, &all_detected_devices);
  7566. } else {
  7567. printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
  7568. ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
  7569. }
  7570. }
  7571. static void autostart_arrays(int part)
  7572. {
  7573. struct md_rdev *rdev;
  7574. struct detected_devices_node *node_detected_dev;
  7575. dev_t dev;
  7576. int i_scanned, i_passed;
  7577. i_scanned = 0;
  7578. i_passed = 0;
  7579. printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
  7580. while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
  7581. i_scanned++;
  7582. node_detected_dev = list_entry(all_detected_devices.next,
  7583. struct detected_devices_node, list);
  7584. list_del(&node_detected_dev->list);
  7585. dev = node_detected_dev->dev;
  7586. kfree(node_detected_dev);
  7587. rdev = md_import_device(dev,0, 90);
  7588. if (IS_ERR(rdev))
  7589. continue;
  7590. if (test_bit(Faulty, &rdev->flags)) {
  7591. MD_BUG();
  7592. continue;
  7593. }
  7594. set_bit(AutoDetected, &rdev->flags);
  7595. list_add(&rdev->same_set, &pending_raid_disks);
  7596. i_passed++;
  7597. }
  7598. printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
  7599. i_scanned, i_passed);
  7600. autorun_devices(part);
  7601. }
  7602. #endif /* !MODULE */
  7603. static __exit void md_exit(void)
  7604. {
  7605. struct mddev *mddev;
  7606. struct list_head *tmp;
  7607. blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
  7608. blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
  7609. unregister_blkdev(MD_MAJOR,"md");
  7610. unregister_blkdev(mdp_major, "mdp");
  7611. unregister_reboot_notifier(&md_notifier);
  7612. unregister_sysctl_table(raid_table_header);
  7613. remove_proc_entry("mdstat", NULL);
  7614. for_each_mddev(mddev, tmp) {
  7615. export_array(mddev);
  7616. mddev->hold_active = 0;
  7617. }
  7618. destroy_workqueue(md_misc_wq);
  7619. destroy_workqueue(md_wq);
  7620. }
  7621. subsys_initcall(md_init);
  7622. module_exit(md_exit)
  7623. static int get_ro(char *buffer, struct kernel_param *kp)
  7624. {
  7625. return sprintf(buffer, "%d", start_readonly);
  7626. }
  7627. static int set_ro(const char *val, struct kernel_param *kp)
  7628. {
  7629. char *e;
  7630. int num = simple_strtoul(val, &e, 10);
  7631. if (*val && (*e == '\0' || *e == '\n')) {
  7632. start_readonly = num;
  7633. return 0;
  7634. }
  7635. return -EINVAL;
  7636. }
  7637. module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
  7638. module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
  7639. module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
  7640. EXPORT_SYMBOL(register_md_personality);
  7641. EXPORT_SYMBOL(unregister_md_personality);
  7642. EXPORT_SYMBOL(md_error);
  7643. EXPORT_SYMBOL(md_done_sync);
  7644. EXPORT_SYMBOL(md_write_start);
  7645. EXPORT_SYMBOL(md_write_end);
  7646. EXPORT_SYMBOL(md_register_thread);
  7647. EXPORT_SYMBOL(md_unregister_thread);
  7648. EXPORT_SYMBOL(md_wakeup_thread);
  7649. EXPORT_SYMBOL(md_check_recovery);
  7650. MODULE_LICENSE("GPL");
  7651. MODULE_DESCRIPTION("MD RAID framework");
  7652. MODULE_ALIAS("md");
  7653. MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);