md.c 224 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706
  1. /*
  2. md.c : Multiple Devices driver for Linux
  3. Copyright (C) 1998, 1999, 2000 Ingo Molnar
  4. completely rewritten, based on the MD driver code from Marc Zyngier
  5. Changes:
  6. - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
  7. - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
  8. - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
  9. - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
  10. - kmod support by: Cyrus Durgin
  11. - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
  12. - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
  13. - lots of fixes and improvements to the RAID1/RAID5 and generic
  14. RAID code (such as request based resynchronization):
  15. Neil Brown <neilb@cse.unsw.edu.au>.
  16. - persistent bitmap code
  17. Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
  18. This program is free software; you can redistribute it and/or modify
  19. it under the terms of the GNU General Public License as published by
  20. the Free Software Foundation; either version 2, or (at your option)
  21. any later version.
  22. You should have received a copy of the GNU General Public License
  23. (for example /usr/src/linux/COPYING); if not, write to the Free
  24. Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  25. */
  26. #include <linux/kthread.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/sysctl.h>
  29. #include <linux/seq_file.h>
  30. #include <linux/fs.h>
  31. #include <linux/poll.h>
  32. #include <linux/ctype.h>
  33. #include <linux/string.h>
  34. #include <linux/hdreg.h>
  35. #include <linux/proc_fs.h>
  36. #include <linux/random.h>
  37. #include <linux/module.h>
  38. #include <linux/reboot.h>
  39. #include <linux/file.h>
  40. #include <linux/compat.h>
  41. #include <linux/delay.h>
  42. #include <linux/raid/md_p.h>
  43. #include <linux/raid/md_u.h>
  44. #include <linux/slab.h>
  45. #include "md.h"
  46. #include "bitmap.h"
  47. #ifndef MODULE
  48. static void autostart_arrays(int part);
  49. #endif
  50. /* pers_list is a list of registered personalities protected
  51. * by pers_lock.
  52. * pers_lock does extra service to protect accesses to
  53. * mddev->thread when the mutex cannot be held.
  54. */
  55. static LIST_HEAD(pers_list);
  56. static DEFINE_SPINLOCK(pers_lock);
  57. static void md_print_devices(void);
  58. static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
  59. static struct workqueue_struct *md_wq;
  60. static struct workqueue_struct *md_misc_wq;
  61. static int remove_and_add_spares(struct mddev *mddev,
  62. struct md_rdev *this);
  63. #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
  64. /*
  65. * Default number of read corrections we'll attempt on an rdev
  66. * before ejecting it from the array. We divide the read error
  67. * count by 2 for every hour elapsed between read errors.
  68. */
  69. #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
  70. /*
  71. * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
  72. * is 1000 KB/sec, so the extra system load does not show up that much.
  73. * Increase it if you want to have more _guaranteed_ speed. Note that
  74. * the RAID driver will use the maximum available bandwidth if the IO
  75. * subsystem is idle. There is also an 'absolute maximum' reconstruction
  76. * speed limit - in case reconstruction slows down your system despite
  77. * idle IO detection.
  78. *
  79. * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
  80. * or /sys/block/mdX/md/sync_speed_{min,max}
  81. */
  82. static int sysctl_speed_limit_min = 1000;
  83. static int sysctl_speed_limit_max = 200000;
  84. static inline int speed_min(struct mddev *mddev)
  85. {
  86. return mddev->sync_speed_min ?
  87. mddev->sync_speed_min : sysctl_speed_limit_min;
  88. }
  89. static inline int speed_max(struct mddev *mddev)
  90. {
  91. return mddev->sync_speed_max ?
  92. mddev->sync_speed_max : sysctl_speed_limit_max;
  93. }
  94. static struct ctl_table_header *raid_table_header;
  95. static ctl_table raid_table[] = {
  96. {
  97. .procname = "speed_limit_min",
  98. .data = &sysctl_speed_limit_min,
  99. .maxlen = sizeof(int),
  100. .mode = S_IRUGO|S_IWUSR,
  101. .proc_handler = proc_dointvec,
  102. },
  103. {
  104. .procname = "speed_limit_max",
  105. .data = &sysctl_speed_limit_max,
  106. .maxlen = sizeof(int),
  107. .mode = S_IRUGO|S_IWUSR,
  108. .proc_handler = proc_dointvec,
  109. },
  110. { }
  111. };
  112. static ctl_table raid_dir_table[] = {
  113. {
  114. .procname = "raid",
  115. .maxlen = 0,
  116. .mode = S_IRUGO|S_IXUGO,
  117. .child = raid_table,
  118. },
  119. { }
  120. };
  121. static ctl_table raid_root_table[] = {
  122. {
  123. .procname = "dev",
  124. .maxlen = 0,
  125. .mode = 0555,
  126. .child = raid_dir_table,
  127. },
  128. { }
  129. };
  130. static const struct block_device_operations md_fops;
  131. static int start_readonly;
  132. /* bio_clone_mddev
  133. * like bio_clone, but with a local bio set
  134. */
  135. struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
  136. struct mddev *mddev)
  137. {
  138. struct bio *b;
  139. if (!mddev || !mddev->bio_set)
  140. return bio_alloc(gfp_mask, nr_iovecs);
  141. b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
  142. if (!b)
  143. return NULL;
  144. return b;
  145. }
  146. EXPORT_SYMBOL_GPL(bio_alloc_mddev);
  147. struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
  148. struct mddev *mddev)
  149. {
  150. if (!mddev || !mddev->bio_set)
  151. return bio_clone(bio, gfp_mask);
  152. return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
  153. }
  154. EXPORT_SYMBOL_GPL(bio_clone_mddev);
  155. void md_trim_bio(struct bio *bio, int offset, int size)
  156. {
  157. /* 'bio' is a cloned bio which we need to trim to match
  158. * the given offset and size.
  159. * This requires adjusting bi_sector, bi_size, and bi_io_vec
  160. */
  161. int i;
  162. struct bio_vec *bvec;
  163. int sofar = 0;
  164. size <<= 9;
  165. if (offset == 0 && size == bio->bi_size)
  166. return;
  167. clear_bit(BIO_SEG_VALID, &bio->bi_flags);
  168. bio_advance(bio, offset << 9);
  169. bio->bi_size = size;
  170. /* avoid any complications with bi_idx being non-zero*/
  171. if (bio->bi_idx) {
  172. memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
  173. (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
  174. bio->bi_vcnt -= bio->bi_idx;
  175. bio->bi_idx = 0;
  176. }
  177. /* Make sure vcnt and last bv are not too big */
  178. bio_for_each_segment(bvec, bio, i) {
  179. if (sofar + bvec->bv_len > size)
  180. bvec->bv_len = size - sofar;
  181. if (bvec->bv_len == 0) {
  182. bio->bi_vcnt = i;
  183. break;
  184. }
  185. sofar += bvec->bv_len;
  186. }
  187. }
  188. EXPORT_SYMBOL_GPL(md_trim_bio);
  189. /*
  190. * We have a system wide 'event count' that is incremented
  191. * on any 'interesting' event, and readers of /proc/mdstat
  192. * can use 'poll' or 'select' to find out when the event
  193. * count increases.
  194. *
  195. * Events are:
  196. * start array, stop array, error, add device, remove device,
  197. * start build, activate spare
  198. */
  199. static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
  200. static atomic_t md_event_count;
  201. void md_new_event(struct mddev *mddev)
  202. {
  203. atomic_inc(&md_event_count);
  204. wake_up(&md_event_waiters);
  205. }
  206. EXPORT_SYMBOL_GPL(md_new_event);
  207. /* Alternate version that can be called from interrupts
  208. * when calling sysfs_notify isn't needed.
  209. */
  210. static void md_new_event_inintr(struct mddev *mddev)
  211. {
  212. atomic_inc(&md_event_count);
  213. wake_up(&md_event_waiters);
  214. }
  215. /*
  216. * Enables to iterate over all existing md arrays
  217. * all_mddevs_lock protects this list.
  218. */
  219. static LIST_HEAD(all_mddevs);
  220. static DEFINE_SPINLOCK(all_mddevs_lock);
  221. /*
  222. * iterates through all used mddevs in the system.
  223. * We take care to grab the all_mddevs_lock whenever navigating
  224. * the list, and to always hold a refcount when unlocked.
  225. * Any code which breaks out of this loop while own
  226. * a reference to the current mddev and must mddev_put it.
  227. */
  228. #define for_each_mddev(_mddev,_tmp) \
  229. \
  230. for (({ spin_lock(&all_mddevs_lock); \
  231. _tmp = all_mddevs.next; \
  232. _mddev = NULL;}); \
  233. ({ if (_tmp != &all_mddevs) \
  234. mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
  235. spin_unlock(&all_mddevs_lock); \
  236. if (_mddev) mddev_put(_mddev); \
  237. _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
  238. _tmp != &all_mddevs;}); \
  239. ({ spin_lock(&all_mddevs_lock); \
  240. _tmp = _tmp->next;}) \
  241. )
  242. /* Rather than calling directly into the personality make_request function,
  243. * IO requests come here first so that we can check if the device is
  244. * being suspended pending a reconfiguration.
  245. * We hold a refcount over the call to ->make_request. By the time that
  246. * call has finished, the bio has been linked into some internal structure
  247. * and so is visible to ->quiesce(), so we don't need the refcount any more.
  248. */
  249. static void md_make_request(struct request_queue *q, struct bio *bio)
  250. {
  251. const int rw = bio_data_dir(bio);
  252. struct mddev *mddev = q->queuedata;
  253. int cpu;
  254. unsigned int sectors;
  255. if (mddev == NULL || mddev->pers == NULL
  256. || !mddev->ready) {
  257. bio_io_error(bio);
  258. return;
  259. }
  260. if (mddev->ro == 1 && unlikely(rw == WRITE)) {
  261. bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
  262. return;
  263. }
  264. smp_rmb(); /* Ensure implications of 'active' are visible */
  265. rcu_read_lock();
  266. if (mddev->suspended) {
  267. DEFINE_WAIT(__wait);
  268. for (;;) {
  269. prepare_to_wait(&mddev->sb_wait, &__wait,
  270. TASK_UNINTERRUPTIBLE);
  271. if (!mddev->suspended)
  272. break;
  273. rcu_read_unlock();
  274. schedule();
  275. rcu_read_lock();
  276. }
  277. finish_wait(&mddev->sb_wait, &__wait);
  278. }
  279. atomic_inc(&mddev->active_io);
  280. rcu_read_unlock();
  281. /*
  282. * save the sectors now since our bio can
  283. * go away inside make_request
  284. */
  285. sectors = bio_sectors(bio);
  286. mddev->pers->make_request(mddev, bio);
  287. cpu = part_stat_lock();
  288. part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
  289. part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
  290. part_stat_unlock();
  291. if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
  292. wake_up(&mddev->sb_wait);
  293. }
  294. /* mddev_suspend makes sure no new requests are submitted
  295. * to the device, and that any requests that have been submitted
  296. * are completely handled.
  297. * Once ->stop is called and completes, the module will be completely
  298. * unused.
  299. */
  300. void mddev_suspend(struct mddev *mddev)
  301. {
  302. BUG_ON(mddev->suspended);
  303. mddev->suspended = 1;
  304. synchronize_rcu();
  305. wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
  306. mddev->pers->quiesce(mddev, 1);
  307. del_timer_sync(&mddev->safemode_timer);
  308. }
  309. EXPORT_SYMBOL_GPL(mddev_suspend);
  310. void mddev_resume(struct mddev *mddev)
  311. {
  312. mddev->suspended = 0;
  313. wake_up(&mddev->sb_wait);
  314. mddev->pers->quiesce(mddev, 0);
  315. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  316. md_wakeup_thread(mddev->thread);
  317. md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
  318. }
  319. EXPORT_SYMBOL_GPL(mddev_resume);
  320. int mddev_congested(struct mddev *mddev, int bits)
  321. {
  322. return mddev->suspended;
  323. }
  324. EXPORT_SYMBOL(mddev_congested);
  325. /*
  326. * Generic flush handling for md
  327. */
  328. static void md_end_flush(struct bio *bio, int err)
  329. {
  330. struct md_rdev *rdev = bio->bi_private;
  331. struct mddev *mddev = rdev->mddev;
  332. rdev_dec_pending(rdev, mddev);
  333. if (atomic_dec_and_test(&mddev->flush_pending)) {
  334. /* The pre-request flush has finished */
  335. queue_work(md_wq, &mddev->flush_work);
  336. }
  337. bio_put(bio);
  338. }
  339. static void md_submit_flush_data(struct work_struct *ws);
  340. static void submit_flushes(struct work_struct *ws)
  341. {
  342. struct mddev *mddev = container_of(ws, struct mddev, flush_work);
  343. struct md_rdev *rdev;
  344. INIT_WORK(&mddev->flush_work, md_submit_flush_data);
  345. atomic_set(&mddev->flush_pending, 1);
  346. rcu_read_lock();
  347. rdev_for_each_rcu(rdev, mddev)
  348. if (rdev->raid_disk >= 0 &&
  349. !test_bit(Faulty, &rdev->flags)) {
  350. /* Take two references, one is dropped
  351. * when request finishes, one after
  352. * we reclaim rcu_read_lock
  353. */
  354. struct bio *bi;
  355. atomic_inc(&rdev->nr_pending);
  356. atomic_inc(&rdev->nr_pending);
  357. rcu_read_unlock();
  358. bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
  359. bi->bi_end_io = md_end_flush;
  360. bi->bi_private = rdev;
  361. bi->bi_bdev = rdev->bdev;
  362. atomic_inc(&mddev->flush_pending);
  363. submit_bio(WRITE_FLUSH, bi);
  364. rcu_read_lock();
  365. rdev_dec_pending(rdev, mddev);
  366. }
  367. rcu_read_unlock();
  368. if (atomic_dec_and_test(&mddev->flush_pending))
  369. queue_work(md_wq, &mddev->flush_work);
  370. }
  371. static void md_submit_flush_data(struct work_struct *ws)
  372. {
  373. struct mddev *mddev = container_of(ws, struct mddev, flush_work);
  374. struct bio *bio = mddev->flush_bio;
  375. if (bio->bi_size == 0)
  376. /* an empty barrier - all done */
  377. bio_endio(bio, 0);
  378. else {
  379. bio->bi_rw &= ~REQ_FLUSH;
  380. mddev->pers->make_request(mddev, bio);
  381. }
  382. mddev->flush_bio = NULL;
  383. wake_up(&mddev->sb_wait);
  384. }
  385. void md_flush_request(struct mddev *mddev, struct bio *bio)
  386. {
  387. spin_lock_irq(&mddev->write_lock);
  388. wait_event_lock_irq(mddev->sb_wait,
  389. !mddev->flush_bio,
  390. mddev->write_lock);
  391. mddev->flush_bio = bio;
  392. spin_unlock_irq(&mddev->write_lock);
  393. INIT_WORK(&mddev->flush_work, submit_flushes);
  394. queue_work(md_wq, &mddev->flush_work);
  395. }
  396. EXPORT_SYMBOL(md_flush_request);
  397. void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
  398. {
  399. struct mddev *mddev = cb->data;
  400. md_wakeup_thread(mddev->thread);
  401. kfree(cb);
  402. }
  403. EXPORT_SYMBOL(md_unplug);
  404. static inline struct mddev *mddev_get(struct mddev *mddev)
  405. {
  406. atomic_inc(&mddev->active);
  407. return mddev;
  408. }
  409. static void mddev_delayed_delete(struct work_struct *ws);
  410. static void mddev_put(struct mddev *mddev)
  411. {
  412. struct bio_set *bs = NULL;
  413. if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
  414. return;
  415. if (!mddev->raid_disks && list_empty(&mddev->disks) &&
  416. mddev->ctime == 0 && !mddev->hold_active) {
  417. /* Array is not configured at all, and not held active,
  418. * so destroy it */
  419. list_del_init(&mddev->all_mddevs);
  420. bs = mddev->bio_set;
  421. mddev->bio_set = NULL;
  422. if (mddev->gendisk) {
  423. /* We did a probe so need to clean up. Call
  424. * queue_work inside the spinlock so that
  425. * flush_workqueue() after mddev_find will
  426. * succeed in waiting for the work to be done.
  427. */
  428. INIT_WORK(&mddev->del_work, mddev_delayed_delete);
  429. queue_work(md_misc_wq, &mddev->del_work);
  430. } else
  431. kfree(mddev);
  432. }
  433. spin_unlock(&all_mddevs_lock);
  434. if (bs)
  435. bioset_free(bs);
  436. }
  437. void mddev_init(struct mddev *mddev)
  438. {
  439. mutex_init(&mddev->open_mutex);
  440. mutex_init(&mddev->reconfig_mutex);
  441. mutex_init(&mddev->bitmap_info.mutex);
  442. INIT_LIST_HEAD(&mddev->disks);
  443. INIT_LIST_HEAD(&mddev->all_mddevs);
  444. init_timer(&mddev->safemode_timer);
  445. atomic_set(&mddev->active, 1);
  446. atomic_set(&mddev->openers, 0);
  447. atomic_set(&mddev->active_io, 0);
  448. spin_lock_init(&mddev->write_lock);
  449. atomic_set(&mddev->flush_pending, 0);
  450. init_waitqueue_head(&mddev->sb_wait);
  451. init_waitqueue_head(&mddev->recovery_wait);
  452. mddev->reshape_position = MaxSector;
  453. mddev->reshape_backwards = 0;
  454. mddev->last_sync_action = "none";
  455. mddev->resync_min = 0;
  456. mddev->resync_max = MaxSector;
  457. mddev->level = LEVEL_NONE;
  458. }
  459. EXPORT_SYMBOL_GPL(mddev_init);
  460. static struct mddev * mddev_find(dev_t unit)
  461. {
  462. struct mddev *mddev, *new = NULL;
  463. if (unit && MAJOR(unit) != MD_MAJOR)
  464. unit &= ~((1<<MdpMinorShift)-1);
  465. retry:
  466. spin_lock(&all_mddevs_lock);
  467. if (unit) {
  468. list_for_each_entry(mddev, &all_mddevs, all_mddevs)
  469. if (mddev->unit == unit) {
  470. mddev_get(mddev);
  471. spin_unlock(&all_mddevs_lock);
  472. kfree(new);
  473. return mddev;
  474. }
  475. if (new) {
  476. list_add(&new->all_mddevs, &all_mddevs);
  477. spin_unlock(&all_mddevs_lock);
  478. new->hold_active = UNTIL_IOCTL;
  479. return new;
  480. }
  481. } else if (new) {
  482. /* find an unused unit number */
  483. static int next_minor = 512;
  484. int start = next_minor;
  485. int is_free = 0;
  486. int dev = 0;
  487. while (!is_free) {
  488. dev = MKDEV(MD_MAJOR, next_minor);
  489. next_minor++;
  490. if (next_minor > MINORMASK)
  491. next_minor = 0;
  492. if (next_minor == start) {
  493. /* Oh dear, all in use. */
  494. spin_unlock(&all_mddevs_lock);
  495. kfree(new);
  496. return NULL;
  497. }
  498. is_free = 1;
  499. list_for_each_entry(mddev, &all_mddevs, all_mddevs)
  500. if (mddev->unit == dev) {
  501. is_free = 0;
  502. break;
  503. }
  504. }
  505. new->unit = dev;
  506. new->md_minor = MINOR(dev);
  507. new->hold_active = UNTIL_STOP;
  508. list_add(&new->all_mddevs, &all_mddevs);
  509. spin_unlock(&all_mddevs_lock);
  510. return new;
  511. }
  512. spin_unlock(&all_mddevs_lock);
  513. new = kzalloc(sizeof(*new), GFP_KERNEL);
  514. if (!new)
  515. return NULL;
  516. new->unit = unit;
  517. if (MAJOR(unit) == MD_MAJOR)
  518. new->md_minor = MINOR(unit);
  519. else
  520. new->md_minor = MINOR(unit) >> MdpMinorShift;
  521. mddev_init(new);
  522. goto retry;
  523. }
  524. static inline int mddev_lock(struct mddev * mddev)
  525. {
  526. return mutex_lock_interruptible(&mddev->reconfig_mutex);
  527. }
  528. static inline int mddev_is_locked(struct mddev *mddev)
  529. {
  530. return mutex_is_locked(&mddev->reconfig_mutex);
  531. }
  532. static inline int mddev_trylock(struct mddev * mddev)
  533. {
  534. return mutex_trylock(&mddev->reconfig_mutex);
  535. }
  536. static struct attribute_group md_redundancy_group;
  537. static void mddev_unlock(struct mddev * mddev)
  538. {
  539. if (mddev->to_remove) {
  540. /* These cannot be removed under reconfig_mutex as
  541. * an access to the files will try to take reconfig_mutex
  542. * while holding the file unremovable, which leads to
  543. * a deadlock.
  544. * So hold set sysfs_active while the remove in happeing,
  545. * and anything else which might set ->to_remove or my
  546. * otherwise change the sysfs namespace will fail with
  547. * -EBUSY if sysfs_active is still set.
  548. * We set sysfs_active under reconfig_mutex and elsewhere
  549. * test it under the same mutex to ensure its correct value
  550. * is seen.
  551. */
  552. struct attribute_group *to_remove = mddev->to_remove;
  553. mddev->to_remove = NULL;
  554. mddev->sysfs_active = 1;
  555. mutex_unlock(&mddev->reconfig_mutex);
  556. if (mddev->kobj.sd) {
  557. if (to_remove != &md_redundancy_group)
  558. sysfs_remove_group(&mddev->kobj, to_remove);
  559. if (mddev->pers == NULL ||
  560. mddev->pers->sync_request == NULL) {
  561. sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
  562. if (mddev->sysfs_action)
  563. sysfs_put(mddev->sysfs_action);
  564. mddev->sysfs_action = NULL;
  565. }
  566. }
  567. mddev->sysfs_active = 0;
  568. } else
  569. mutex_unlock(&mddev->reconfig_mutex);
  570. /* As we've dropped the mutex we need a spinlock to
  571. * make sure the thread doesn't disappear
  572. */
  573. spin_lock(&pers_lock);
  574. md_wakeup_thread(mddev->thread);
  575. spin_unlock(&pers_lock);
  576. }
  577. static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr)
  578. {
  579. struct md_rdev *rdev;
  580. rdev_for_each(rdev, mddev)
  581. if (rdev->desc_nr == nr)
  582. return rdev;
  583. return NULL;
  584. }
  585. static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
  586. {
  587. struct md_rdev *rdev;
  588. rdev_for_each_rcu(rdev, mddev)
  589. if (rdev->desc_nr == nr)
  590. return rdev;
  591. return NULL;
  592. }
  593. static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
  594. {
  595. struct md_rdev *rdev;
  596. rdev_for_each(rdev, mddev)
  597. if (rdev->bdev->bd_dev == dev)
  598. return rdev;
  599. return NULL;
  600. }
  601. static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
  602. {
  603. struct md_rdev *rdev;
  604. rdev_for_each_rcu(rdev, mddev)
  605. if (rdev->bdev->bd_dev == dev)
  606. return rdev;
  607. return NULL;
  608. }
  609. static struct md_personality *find_pers(int level, char *clevel)
  610. {
  611. struct md_personality *pers;
  612. list_for_each_entry(pers, &pers_list, list) {
  613. if (level != LEVEL_NONE && pers->level == level)
  614. return pers;
  615. if (strcmp(pers->name, clevel)==0)
  616. return pers;
  617. }
  618. return NULL;
  619. }
  620. /* return the offset of the super block in 512byte sectors */
  621. static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
  622. {
  623. sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
  624. return MD_NEW_SIZE_SECTORS(num_sectors);
  625. }
  626. static int alloc_disk_sb(struct md_rdev * rdev)
  627. {
  628. if (rdev->sb_page)
  629. MD_BUG();
  630. rdev->sb_page = alloc_page(GFP_KERNEL);
  631. if (!rdev->sb_page) {
  632. printk(KERN_ALERT "md: out of memory.\n");
  633. return -ENOMEM;
  634. }
  635. return 0;
  636. }
  637. void md_rdev_clear(struct md_rdev *rdev)
  638. {
  639. if (rdev->sb_page) {
  640. put_page(rdev->sb_page);
  641. rdev->sb_loaded = 0;
  642. rdev->sb_page = NULL;
  643. rdev->sb_start = 0;
  644. rdev->sectors = 0;
  645. }
  646. if (rdev->bb_page) {
  647. put_page(rdev->bb_page);
  648. rdev->bb_page = NULL;
  649. }
  650. kfree(rdev->badblocks.page);
  651. rdev->badblocks.page = NULL;
  652. }
  653. EXPORT_SYMBOL_GPL(md_rdev_clear);
  654. static void super_written(struct bio *bio, int error)
  655. {
  656. struct md_rdev *rdev = bio->bi_private;
  657. struct mddev *mddev = rdev->mddev;
  658. if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
  659. printk("md: super_written gets error=%d, uptodate=%d\n",
  660. error, test_bit(BIO_UPTODATE, &bio->bi_flags));
  661. WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
  662. md_error(mddev, rdev);
  663. }
  664. if (atomic_dec_and_test(&mddev->pending_writes))
  665. wake_up(&mddev->sb_wait);
  666. bio_put(bio);
  667. }
  668. void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
  669. sector_t sector, int size, struct page *page)
  670. {
  671. /* write first size bytes of page to sector of rdev
  672. * Increment mddev->pending_writes before returning
  673. * and decrement it on completion, waking up sb_wait
  674. * if zero is reached.
  675. * If an error occurred, call md_error
  676. */
  677. struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
  678. bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
  679. bio->bi_sector = sector;
  680. bio_add_page(bio, page, size, 0);
  681. bio->bi_private = rdev;
  682. bio->bi_end_io = super_written;
  683. atomic_inc(&mddev->pending_writes);
  684. submit_bio(WRITE_FLUSH_FUA, bio);
  685. }
  686. void md_super_wait(struct mddev *mddev)
  687. {
  688. /* wait for all superblock writes that were scheduled to complete */
  689. DEFINE_WAIT(wq);
  690. for(;;) {
  691. prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
  692. if (atomic_read(&mddev->pending_writes)==0)
  693. break;
  694. schedule();
  695. }
  696. finish_wait(&mddev->sb_wait, &wq);
  697. }
  698. static void bi_complete(struct bio *bio, int error)
  699. {
  700. complete((struct completion*)bio->bi_private);
  701. }
  702. int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
  703. struct page *page, int rw, bool metadata_op)
  704. {
  705. struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
  706. struct completion event;
  707. int ret;
  708. rw |= REQ_SYNC;
  709. bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
  710. rdev->meta_bdev : rdev->bdev;
  711. if (metadata_op)
  712. bio->bi_sector = sector + rdev->sb_start;
  713. else if (rdev->mddev->reshape_position != MaxSector &&
  714. (rdev->mddev->reshape_backwards ==
  715. (sector >= rdev->mddev->reshape_position)))
  716. bio->bi_sector = sector + rdev->new_data_offset;
  717. else
  718. bio->bi_sector = sector + rdev->data_offset;
  719. bio_add_page(bio, page, size, 0);
  720. init_completion(&event);
  721. bio->bi_private = &event;
  722. bio->bi_end_io = bi_complete;
  723. submit_bio(rw, bio);
  724. wait_for_completion(&event);
  725. ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
  726. bio_put(bio);
  727. return ret;
  728. }
  729. EXPORT_SYMBOL_GPL(sync_page_io);
  730. static int read_disk_sb(struct md_rdev * rdev, int size)
  731. {
  732. char b[BDEVNAME_SIZE];
  733. if (!rdev->sb_page) {
  734. MD_BUG();
  735. return -EINVAL;
  736. }
  737. if (rdev->sb_loaded)
  738. return 0;
  739. if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
  740. goto fail;
  741. rdev->sb_loaded = 1;
  742. return 0;
  743. fail:
  744. printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
  745. bdevname(rdev->bdev,b));
  746. return -EINVAL;
  747. }
  748. static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
  749. {
  750. return sb1->set_uuid0 == sb2->set_uuid0 &&
  751. sb1->set_uuid1 == sb2->set_uuid1 &&
  752. sb1->set_uuid2 == sb2->set_uuid2 &&
  753. sb1->set_uuid3 == sb2->set_uuid3;
  754. }
  755. static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
  756. {
  757. int ret;
  758. mdp_super_t *tmp1, *tmp2;
  759. tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
  760. tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
  761. if (!tmp1 || !tmp2) {
  762. ret = 0;
  763. printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
  764. goto abort;
  765. }
  766. *tmp1 = *sb1;
  767. *tmp2 = *sb2;
  768. /*
  769. * nr_disks is not constant
  770. */
  771. tmp1->nr_disks = 0;
  772. tmp2->nr_disks = 0;
  773. ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
  774. abort:
  775. kfree(tmp1);
  776. kfree(tmp2);
  777. return ret;
  778. }
  779. static u32 md_csum_fold(u32 csum)
  780. {
  781. csum = (csum & 0xffff) + (csum >> 16);
  782. return (csum & 0xffff) + (csum >> 16);
  783. }
  784. static unsigned int calc_sb_csum(mdp_super_t * sb)
  785. {
  786. u64 newcsum = 0;
  787. u32 *sb32 = (u32*)sb;
  788. int i;
  789. unsigned int disk_csum, csum;
  790. disk_csum = sb->sb_csum;
  791. sb->sb_csum = 0;
  792. for (i = 0; i < MD_SB_BYTES/4 ; i++)
  793. newcsum += sb32[i];
  794. csum = (newcsum & 0xffffffff) + (newcsum>>32);
  795. #ifdef CONFIG_ALPHA
  796. /* This used to use csum_partial, which was wrong for several
  797. * reasons including that different results are returned on
  798. * different architectures. It isn't critical that we get exactly
  799. * the same return value as before (we always csum_fold before
  800. * testing, and that removes any differences). However as we
  801. * know that csum_partial always returned a 16bit value on
  802. * alphas, do a fold to maximise conformity to previous behaviour.
  803. */
  804. sb->sb_csum = md_csum_fold(disk_csum);
  805. #else
  806. sb->sb_csum = disk_csum;
  807. #endif
  808. return csum;
  809. }
  810. /*
  811. * Handle superblock details.
  812. * We want to be able to handle multiple superblock formats
  813. * so we have a common interface to them all, and an array of
  814. * different handlers.
  815. * We rely on user-space to write the initial superblock, and support
  816. * reading and updating of superblocks.
  817. * Interface methods are:
  818. * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
  819. * loads and validates a superblock on dev.
  820. * if refdev != NULL, compare superblocks on both devices
  821. * Return:
  822. * 0 - dev has a superblock that is compatible with refdev
  823. * 1 - dev has a superblock that is compatible and newer than refdev
  824. * so dev should be used as the refdev in future
  825. * -EINVAL superblock incompatible or invalid
  826. * -othererror e.g. -EIO
  827. *
  828. * int validate_super(struct mddev *mddev, struct md_rdev *dev)
  829. * Verify that dev is acceptable into mddev.
  830. * The first time, mddev->raid_disks will be 0, and data from
  831. * dev should be merged in. Subsequent calls check that dev
  832. * is new enough. Return 0 or -EINVAL
  833. *
  834. * void sync_super(struct mddev *mddev, struct md_rdev *dev)
  835. * Update the superblock for rdev with data in mddev
  836. * This does not write to disc.
  837. *
  838. */
  839. struct super_type {
  840. char *name;
  841. struct module *owner;
  842. int (*load_super)(struct md_rdev *rdev,
  843. struct md_rdev *refdev,
  844. int minor_version);
  845. int (*validate_super)(struct mddev *mddev,
  846. struct md_rdev *rdev);
  847. void (*sync_super)(struct mddev *mddev,
  848. struct md_rdev *rdev);
  849. unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
  850. sector_t num_sectors);
  851. int (*allow_new_offset)(struct md_rdev *rdev,
  852. unsigned long long new_offset);
  853. };
  854. /*
  855. * Check that the given mddev has no bitmap.
  856. *
  857. * This function is called from the run method of all personalities that do not
  858. * support bitmaps. It prints an error message and returns non-zero if mddev
  859. * has a bitmap. Otherwise, it returns 0.
  860. *
  861. */
  862. int md_check_no_bitmap(struct mddev *mddev)
  863. {
  864. if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
  865. return 0;
  866. printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
  867. mdname(mddev), mddev->pers->name);
  868. return 1;
  869. }
  870. EXPORT_SYMBOL(md_check_no_bitmap);
  871. /*
  872. * load_super for 0.90.0
  873. */
  874. static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
  875. {
  876. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  877. mdp_super_t *sb;
  878. int ret;
  879. /*
  880. * Calculate the position of the superblock (512byte sectors),
  881. * it's at the end of the disk.
  882. *
  883. * It also happens to be a multiple of 4Kb.
  884. */
  885. rdev->sb_start = calc_dev_sboffset(rdev);
  886. ret = read_disk_sb(rdev, MD_SB_BYTES);
  887. if (ret) return ret;
  888. ret = -EINVAL;
  889. bdevname(rdev->bdev, b);
  890. sb = page_address(rdev->sb_page);
  891. if (sb->md_magic != MD_SB_MAGIC) {
  892. printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
  893. b);
  894. goto abort;
  895. }
  896. if (sb->major_version != 0 ||
  897. sb->minor_version < 90 ||
  898. sb->minor_version > 91) {
  899. printk(KERN_WARNING "Bad version number %d.%d on %s\n",
  900. sb->major_version, sb->minor_version,
  901. b);
  902. goto abort;
  903. }
  904. if (sb->raid_disks <= 0)
  905. goto abort;
  906. if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
  907. printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
  908. b);
  909. goto abort;
  910. }
  911. rdev->preferred_minor = sb->md_minor;
  912. rdev->data_offset = 0;
  913. rdev->new_data_offset = 0;
  914. rdev->sb_size = MD_SB_BYTES;
  915. rdev->badblocks.shift = -1;
  916. if (sb->level == LEVEL_MULTIPATH)
  917. rdev->desc_nr = -1;
  918. else
  919. rdev->desc_nr = sb->this_disk.number;
  920. if (!refdev) {
  921. ret = 1;
  922. } else {
  923. __u64 ev1, ev2;
  924. mdp_super_t *refsb = page_address(refdev->sb_page);
  925. if (!uuid_equal(refsb, sb)) {
  926. printk(KERN_WARNING "md: %s has different UUID to %s\n",
  927. b, bdevname(refdev->bdev,b2));
  928. goto abort;
  929. }
  930. if (!sb_equal(refsb, sb)) {
  931. printk(KERN_WARNING "md: %s has same UUID"
  932. " but different superblock to %s\n",
  933. b, bdevname(refdev->bdev, b2));
  934. goto abort;
  935. }
  936. ev1 = md_event(sb);
  937. ev2 = md_event(refsb);
  938. if (ev1 > ev2)
  939. ret = 1;
  940. else
  941. ret = 0;
  942. }
  943. rdev->sectors = rdev->sb_start;
  944. /* Limit to 4TB as metadata cannot record more than that.
  945. * (not needed for Linear and RAID0 as metadata doesn't
  946. * record this size)
  947. */
  948. if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
  949. rdev->sectors = (2ULL << 32) - 2;
  950. if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
  951. /* "this cannot possibly happen" ... */
  952. ret = -EINVAL;
  953. abort:
  954. return ret;
  955. }
  956. /*
  957. * validate_super for 0.90.0
  958. */
  959. static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
  960. {
  961. mdp_disk_t *desc;
  962. mdp_super_t *sb = page_address(rdev->sb_page);
  963. __u64 ev1 = md_event(sb);
  964. rdev->raid_disk = -1;
  965. clear_bit(Faulty, &rdev->flags);
  966. clear_bit(In_sync, &rdev->flags);
  967. clear_bit(WriteMostly, &rdev->flags);
  968. if (mddev->raid_disks == 0) {
  969. mddev->major_version = 0;
  970. mddev->minor_version = sb->minor_version;
  971. mddev->patch_version = sb->patch_version;
  972. mddev->external = 0;
  973. mddev->chunk_sectors = sb->chunk_size >> 9;
  974. mddev->ctime = sb->ctime;
  975. mddev->utime = sb->utime;
  976. mddev->level = sb->level;
  977. mddev->clevel[0] = 0;
  978. mddev->layout = sb->layout;
  979. mddev->raid_disks = sb->raid_disks;
  980. mddev->dev_sectors = ((sector_t)sb->size) * 2;
  981. mddev->events = ev1;
  982. mddev->bitmap_info.offset = 0;
  983. mddev->bitmap_info.space = 0;
  984. /* bitmap can use 60 K after the 4K superblocks */
  985. mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
  986. mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
  987. mddev->reshape_backwards = 0;
  988. if (mddev->minor_version >= 91) {
  989. mddev->reshape_position = sb->reshape_position;
  990. mddev->delta_disks = sb->delta_disks;
  991. mddev->new_level = sb->new_level;
  992. mddev->new_layout = sb->new_layout;
  993. mddev->new_chunk_sectors = sb->new_chunk >> 9;
  994. if (mddev->delta_disks < 0)
  995. mddev->reshape_backwards = 1;
  996. } else {
  997. mddev->reshape_position = MaxSector;
  998. mddev->delta_disks = 0;
  999. mddev->new_level = mddev->level;
  1000. mddev->new_layout = mddev->layout;
  1001. mddev->new_chunk_sectors = mddev->chunk_sectors;
  1002. }
  1003. if (sb->state & (1<<MD_SB_CLEAN))
  1004. mddev->recovery_cp = MaxSector;
  1005. else {
  1006. if (sb->events_hi == sb->cp_events_hi &&
  1007. sb->events_lo == sb->cp_events_lo) {
  1008. mddev->recovery_cp = sb->recovery_cp;
  1009. } else
  1010. mddev->recovery_cp = 0;
  1011. }
  1012. memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
  1013. memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
  1014. memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
  1015. memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
  1016. mddev->max_disks = MD_SB_DISKS;
  1017. if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
  1018. mddev->bitmap_info.file == NULL) {
  1019. mddev->bitmap_info.offset =
  1020. mddev->bitmap_info.default_offset;
  1021. mddev->bitmap_info.space =
  1022. mddev->bitmap_info.default_space;
  1023. }
  1024. } else if (mddev->pers == NULL) {
  1025. /* Insist on good event counter while assembling, except
  1026. * for spares (which don't need an event count) */
  1027. ++ev1;
  1028. if (sb->disks[rdev->desc_nr].state & (
  1029. (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
  1030. if (ev1 < mddev->events)
  1031. return -EINVAL;
  1032. } else if (mddev->bitmap) {
  1033. /* if adding to array with a bitmap, then we can accept an
  1034. * older device ... but not too old.
  1035. */
  1036. if (ev1 < mddev->bitmap->events_cleared)
  1037. return 0;
  1038. } else {
  1039. if (ev1 < mddev->events)
  1040. /* just a hot-add of a new device, leave raid_disk at -1 */
  1041. return 0;
  1042. }
  1043. if (mddev->level != LEVEL_MULTIPATH) {
  1044. desc = sb->disks + rdev->desc_nr;
  1045. if (desc->state & (1<<MD_DISK_FAULTY))
  1046. set_bit(Faulty, &rdev->flags);
  1047. else if (desc->state & (1<<MD_DISK_SYNC) /* &&
  1048. desc->raid_disk < mddev->raid_disks */) {
  1049. set_bit(In_sync, &rdev->flags);
  1050. rdev->raid_disk = desc->raid_disk;
  1051. } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
  1052. /* active but not in sync implies recovery up to
  1053. * reshape position. We don't know exactly where
  1054. * that is, so set to zero for now */
  1055. if (mddev->minor_version >= 91) {
  1056. rdev->recovery_offset = 0;
  1057. rdev->raid_disk = desc->raid_disk;
  1058. }
  1059. }
  1060. if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
  1061. set_bit(WriteMostly, &rdev->flags);
  1062. } else /* MULTIPATH are always insync */
  1063. set_bit(In_sync, &rdev->flags);
  1064. return 0;
  1065. }
  1066. /*
  1067. * sync_super for 0.90.0
  1068. */
  1069. static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
  1070. {
  1071. mdp_super_t *sb;
  1072. struct md_rdev *rdev2;
  1073. int next_spare = mddev->raid_disks;
  1074. /* make rdev->sb match mddev data..
  1075. *
  1076. * 1/ zero out disks
  1077. * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
  1078. * 3/ any empty disks < next_spare become removed
  1079. *
  1080. * disks[0] gets initialised to REMOVED because
  1081. * we cannot be sure from other fields if it has
  1082. * been initialised or not.
  1083. */
  1084. int i;
  1085. int active=0, working=0,failed=0,spare=0,nr_disks=0;
  1086. rdev->sb_size = MD_SB_BYTES;
  1087. sb = page_address(rdev->sb_page);
  1088. memset(sb, 0, sizeof(*sb));
  1089. sb->md_magic = MD_SB_MAGIC;
  1090. sb->major_version = mddev->major_version;
  1091. sb->patch_version = mddev->patch_version;
  1092. sb->gvalid_words = 0; /* ignored */
  1093. memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
  1094. memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
  1095. memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
  1096. memcpy(&sb->set_uuid3, mddev->uuid+12,4);
  1097. sb->ctime = mddev->ctime;
  1098. sb->level = mddev->level;
  1099. sb->size = mddev->dev_sectors / 2;
  1100. sb->raid_disks = mddev->raid_disks;
  1101. sb->md_minor = mddev->md_minor;
  1102. sb->not_persistent = 0;
  1103. sb->utime = mddev->utime;
  1104. sb->state = 0;
  1105. sb->events_hi = (mddev->events>>32);
  1106. sb->events_lo = (u32)mddev->events;
  1107. if (mddev->reshape_position == MaxSector)
  1108. sb->minor_version = 90;
  1109. else {
  1110. sb->minor_version = 91;
  1111. sb->reshape_position = mddev->reshape_position;
  1112. sb->new_level = mddev->new_level;
  1113. sb->delta_disks = mddev->delta_disks;
  1114. sb->new_layout = mddev->new_layout;
  1115. sb->new_chunk = mddev->new_chunk_sectors << 9;
  1116. }
  1117. mddev->minor_version = sb->minor_version;
  1118. if (mddev->in_sync)
  1119. {
  1120. sb->recovery_cp = mddev->recovery_cp;
  1121. sb->cp_events_hi = (mddev->events>>32);
  1122. sb->cp_events_lo = (u32)mddev->events;
  1123. if (mddev->recovery_cp == MaxSector)
  1124. sb->state = (1<< MD_SB_CLEAN);
  1125. } else
  1126. sb->recovery_cp = 0;
  1127. sb->layout = mddev->layout;
  1128. sb->chunk_size = mddev->chunk_sectors << 9;
  1129. if (mddev->bitmap && mddev->bitmap_info.file == NULL)
  1130. sb->state |= (1<<MD_SB_BITMAP_PRESENT);
  1131. sb->disks[0].state = (1<<MD_DISK_REMOVED);
  1132. rdev_for_each(rdev2, mddev) {
  1133. mdp_disk_t *d;
  1134. int desc_nr;
  1135. int is_active = test_bit(In_sync, &rdev2->flags);
  1136. if (rdev2->raid_disk >= 0 &&
  1137. sb->minor_version >= 91)
  1138. /* we have nowhere to store the recovery_offset,
  1139. * but if it is not below the reshape_position,
  1140. * we can piggy-back on that.
  1141. */
  1142. is_active = 1;
  1143. if (rdev2->raid_disk < 0 ||
  1144. test_bit(Faulty, &rdev2->flags))
  1145. is_active = 0;
  1146. if (is_active)
  1147. desc_nr = rdev2->raid_disk;
  1148. else
  1149. desc_nr = next_spare++;
  1150. rdev2->desc_nr = desc_nr;
  1151. d = &sb->disks[rdev2->desc_nr];
  1152. nr_disks++;
  1153. d->number = rdev2->desc_nr;
  1154. d->major = MAJOR(rdev2->bdev->bd_dev);
  1155. d->minor = MINOR(rdev2->bdev->bd_dev);
  1156. if (is_active)
  1157. d->raid_disk = rdev2->raid_disk;
  1158. else
  1159. d->raid_disk = rdev2->desc_nr; /* compatibility */
  1160. if (test_bit(Faulty, &rdev2->flags))
  1161. d->state = (1<<MD_DISK_FAULTY);
  1162. else if (is_active) {
  1163. d->state = (1<<MD_DISK_ACTIVE);
  1164. if (test_bit(In_sync, &rdev2->flags))
  1165. d->state |= (1<<MD_DISK_SYNC);
  1166. active++;
  1167. working++;
  1168. } else {
  1169. d->state = 0;
  1170. spare++;
  1171. working++;
  1172. }
  1173. if (test_bit(WriteMostly, &rdev2->flags))
  1174. d->state |= (1<<MD_DISK_WRITEMOSTLY);
  1175. }
  1176. /* now set the "removed" and "faulty" bits on any missing devices */
  1177. for (i=0 ; i < mddev->raid_disks ; i++) {
  1178. mdp_disk_t *d = &sb->disks[i];
  1179. if (d->state == 0 && d->number == 0) {
  1180. d->number = i;
  1181. d->raid_disk = i;
  1182. d->state = (1<<MD_DISK_REMOVED);
  1183. d->state |= (1<<MD_DISK_FAULTY);
  1184. failed++;
  1185. }
  1186. }
  1187. sb->nr_disks = nr_disks;
  1188. sb->active_disks = active;
  1189. sb->working_disks = working;
  1190. sb->failed_disks = failed;
  1191. sb->spare_disks = spare;
  1192. sb->this_disk = sb->disks[rdev->desc_nr];
  1193. sb->sb_csum = calc_sb_csum(sb);
  1194. }
  1195. /*
  1196. * rdev_size_change for 0.90.0
  1197. */
  1198. static unsigned long long
  1199. super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
  1200. {
  1201. if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
  1202. return 0; /* component must fit device */
  1203. if (rdev->mddev->bitmap_info.offset)
  1204. return 0; /* can't move bitmap */
  1205. rdev->sb_start = calc_dev_sboffset(rdev);
  1206. if (!num_sectors || num_sectors > rdev->sb_start)
  1207. num_sectors = rdev->sb_start;
  1208. /* Limit to 4TB as metadata cannot record more than that.
  1209. * 4TB == 2^32 KB, or 2*2^32 sectors.
  1210. */
  1211. if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
  1212. num_sectors = (2ULL << 32) - 2;
  1213. md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
  1214. rdev->sb_page);
  1215. md_super_wait(rdev->mddev);
  1216. return num_sectors;
  1217. }
  1218. static int
  1219. super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
  1220. {
  1221. /* non-zero offset changes not possible with v0.90 */
  1222. return new_offset == 0;
  1223. }
  1224. /*
  1225. * version 1 superblock
  1226. */
  1227. static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
  1228. {
  1229. __le32 disk_csum;
  1230. u32 csum;
  1231. unsigned long long newcsum;
  1232. int size = 256 + le32_to_cpu(sb->max_dev)*2;
  1233. __le32 *isuper = (__le32*)sb;
  1234. disk_csum = sb->sb_csum;
  1235. sb->sb_csum = 0;
  1236. newcsum = 0;
  1237. for (; size >= 4; size -= 4)
  1238. newcsum += le32_to_cpu(*isuper++);
  1239. if (size == 2)
  1240. newcsum += le16_to_cpu(*(__le16*) isuper);
  1241. csum = (newcsum & 0xffffffff) + (newcsum >> 32);
  1242. sb->sb_csum = disk_csum;
  1243. return cpu_to_le32(csum);
  1244. }
  1245. static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
  1246. int acknowledged);
  1247. static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
  1248. {
  1249. struct mdp_superblock_1 *sb;
  1250. int ret;
  1251. sector_t sb_start;
  1252. sector_t sectors;
  1253. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  1254. int bmask;
  1255. /*
  1256. * Calculate the position of the superblock in 512byte sectors.
  1257. * It is always aligned to a 4K boundary and
  1258. * depeding on minor_version, it can be:
  1259. * 0: At least 8K, but less than 12K, from end of device
  1260. * 1: At start of device
  1261. * 2: 4K from start of device.
  1262. */
  1263. switch(minor_version) {
  1264. case 0:
  1265. sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
  1266. sb_start -= 8*2;
  1267. sb_start &= ~(sector_t)(4*2-1);
  1268. break;
  1269. case 1:
  1270. sb_start = 0;
  1271. break;
  1272. case 2:
  1273. sb_start = 8;
  1274. break;
  1275. default:
  1276. return -EINVAL;
  1277. }
  1278. rdev->sb_start = sb_start;
  1279. /* superblock is rarely larger than 1K, but it can be larger,
  1280. * and it is safe to read 4k, so we do that
  1281. */
  1282. ret = read_disk_sb(rdev, 4096);
  1283. if (ret) return ret;
  1284. sb = page_address(rdev->sb_page);
  1285. if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
  1286. sb->major_version != cpu_to_le32(1) ||
  1287. le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
  1288. le64_to_cpu(sb->super_offset) != rdev->sb_start ||
  1289. (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
  1290. return -EINVAL;
  1291. if (calc_sb_1_csum(sb) != sb->sb_csum) {
  1292. printk("md: invalid superblock checksum on %s\n",
  1293. bdevname(rdev->bdev,b));
  1294. return -EINVAL;
  1295. }
  1296. if (le64_to_cpu(sb->data_size) < 10) {
  1297. printk("md: data_size too small on %s\n",
  1298. bdevname(rdev->bdev,b));
  1299. return -EINVAL;
  1300. }
  1301. if (sb->pad0 ||
  1302. sb->pad3[0] ||
  1303. memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
  1304. /* Some padding is non-zero, might be a new feature */
  1305. return -EINVAL;
  1306. rdev->preferred_minor = 0xffff;
  1307. rdev->data_offset = le64_to_cpu(sb->data_offset);
  1308. rdev->new_data_offset = rdev->data_offset;
  1309. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
  1310. (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
  1311. rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
  1312. atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
  1313. rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
  1314. bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
  1315. if (rdev->sb_size & bmask)
  1316. rdev->sb_size = (rdev->sb_size | bmask) + 1;
  1317. if (minor_version
  1318. && rdev->data_offset < sb_start + (rdev->sb_size/512))
  1319. return -EINVAL;
  1320. if (minor_version
  1321. && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
  1322. return -EINVAL;
  1323. if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
  1324. rdev->desc_nr = -1;
  1325. else
  1326. rdev->desc_nr = le32_to_cpu(sb->dev_number);
  1327. if (!rdev->bb_page) {
  1328. rdev->bb_page = alloc_page(GFP_KERNEL);
  1329. if (!rdev->bb_page)
  1330. return -ENOMEM;
  1331. }
  1332. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
  1333. rdev->badblocks.count == 0) {
  1334. /* need to load the bad block list.
  1335. * Currently we limit it to one page.
  1336. */
  1337. s32 offset;
  1338. sector_t bb_sector;
  1339. u64 *bbp;
  1340. int i;
  1341. int sectors = le16_to_cpu(sb->bblog_size);
  1342. if (sectors > (PAGE_SIZE / 512))
  1343. return -EINVAL;
  1344. offset = le32_to_cpu(sb->bblog_offset);
  1345. if (offset == 0)
  1346. return -EINVAL;
  1347. bb_sector = (long long)offset;
  1348. if (!sync_page_io(rdev, bb_sector, sectors << 9,
  1349. rdev->bb_page, READ, true))
  1350. return -EIO;
  1351. bbp = (u64 *)page_address(rdev->bb_page);
  1352. rdev->badblocks.shift = sb->bblog_shift;
  1353. for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
  1354. u64 bb = le64_to_cpu(*bbp);
  1355. int count = bb & (0x3ff);
  1356. u64 sector = bb >> 10;
  1357. sector <<= sb->bblog_shift;
  1358. count <<= sb->bblog_shift;
  1359. if (bb + 1 == 0)
  1360. break;
  1361. if (md_set_badblocks(&rdev->badblocks,
  1362. sector, count, 1) == 0)
  1363. return -EINVAL;
  1364. }
  1365. } else if (sb->bblog_offset != 0)
  1366. rdev->badblocks.shift = 0;
  1367. if (!refdev) {
  1368. ret = 1;
  1369. } else {
  1370. __u64 ev1, ev2;
  1371. struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
  1372. if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
  1373. sb->level != refsb->level ||
  1374. sb->layout != refsb->layout ||
  1375. sb->chunksize != refsb->chunksize) {
  1376. printk(KERN_WARNING "md: %s has strangely different"
  1377. " superblock to %s\n",
  1378. bdevname(rdev->bdev,b),
  1379. bdevname(refdev->bdev,b2));
  1380. return -EINVAL;
  1381. }
  1382. ev1 = le64_to_cpu(sb->events);
  1383. ev2 = le64_to_cpu(refsb->events);
  1384. if (ev1 > ev2)
  1385. ret = 1;
  1386. else
  1387. ret = 0;
  1388. }
  1389. if (minor_version) {
  1390. sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
  1391. sectors -= rdev->data_offset;
  1392. } else
  1393. sectors = rdev->sb_start;
  1394. if (sectors < le64_to_cpu(sb->data_size))
  1395. return -EINVAL;
  1396. rdev->sectors = le64_to_cpu(sb->data_size);
  1397. return ret;
  1398. }
  1399. static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
  1400. {
  1401. struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
  1402. __u64 ev1 = le64_to_cpu(sb->events);
  1403. rdev->raid_disk = -1;
  1404. clear_bit(Faulty, &rdev->flags);
  1405. clear_bit(In_sync, &rdev->flags);
  1406. clear_bit(WriteMostly, &rdev->flags);
  1407. if (mddev->raid_disks == 0) {
  1408. mddev->major_version = 1;
  1409. mddev->patch_version = 0;
  1410. mddev->external = 0;
  1411. mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
  1412. mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
  1413. mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
  1414. mddev->level = le32_to_cpu(sb->level);
  1415. mddev->clevel[0] = 0;
  1416. mddev->layout = le32_to_cpu(sb->layout);
  1417. mddev->raid_disks = le32_to_cpu(sb->raid_disks);
  1418. mddev->dev_sectors = le64_to_cpu(sb->size);
  1419. mddev->events = ev1;
  1420. mddev->bitmap_info.offset = 0;
  1421. mddev->bitmap_info.space = 0;
  1422. /* Default location for bitmap is 1K after superblock
  1423. * using 3K - total of 4K
  1424. */
  1425. mddev->bitmap_info.default_offset = 1024 >> 9;
  1426. mddev->bitmap_info.default_space = (4096-1024) >> 9;
  1427. mddev->reshape_backwards = 0;
  1428. mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
  1429. memcpy(mddev->uuid, sb->set_uuid, 16);
  1430. mddev->max_disks = (4096-256)/2;
  1431. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
  1432. mddev->bitmap_info.file == NULL) {
  1433. mddev->bitmap_info.offset =
  1434. (__s32)le32_to_cpu(sb->bitmap_offset);
  1435. /* Metadata doesn't record how much space is available.
  1436. * For 1.0, we assume we can use up to the superblock
  1437. * if before, else to 4K beyond superblock.
  1438. * For others, assume no change is possible.
  1439. */
  1440. if (mddev->minor_version > 0)
  1441. mddev->bitmap_info.space = 0;
  1442. else if (mddev->bitmap_info.offset > 0)
  1443. mddev->bitmap_info.space =
  1444. 8 - mddev->bitmap_info.offset;
  1445. else
  1446. mddev->bitmap_info.space =
  1447. -mddev->bitmap_info.offset;
  1448. }
  1449. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
  1450. mddev->reshape_position = le64_to_cpu(sb->reshape_position);
  1451. mddev->delta_disks = le32_to_cpu(sb->delta_disks);
  1452. mddev->new_level = le32_to_cpu(sb->new_level);
  1453. mddev->new_layout = le32_to_cpu(sb->new_layout);
  1454. mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
  1455. if (mddev->delta_disks < 0 ||
  1456. (mddev->delta_disks == 0 &&
  1457. (le32_to_cpu(sb->feature_map)
  1458. & MD_FEATURE_RESHAPE_BACKWARDS)))
  1459. mddev->reshape_backwards = 1;
  1460. } else {
  1461. mddev->reshape_position = MaxSector;
  1462. mddev->delta_disks = 0;
  1463. mddev->new_level = mddev->level;
  1464. mddev->new_layout = mddev->layout;
  1465. mddev->new_chunk_sectors = mddev->chunk_sectors;
  1466. }
  1467. } else if (mddev->pers == NULL) {
  1468. /* Insist of good event counter while assembling, except for
  1469. * spares (which don't need an event count) */
  1470. ++ev1;
  1471. if (rdev->desc_nr >= 0 &&
  1472. rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
  1473. le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
  1474. if (ev1 < mddev->events)
  1475. return -EINVAL;
  1476. } else if (mddev->bitmap) {
  1477. /* If adding to array with a bitmap, then we can accept an
  1478. * older device, but not too old.
  1479. */
  1480. if (ev1 < mddev->bitmap->events_cleared)
  1481. return 0;
  1482. } else {
  1483. if (ev1 < mddev->events)
  1484. /* just a hot-add of a new device, leave raid_disk at -1 */
  1485. return 0;
  1486. }
  1487. if (mddev->level != LEVEL_MULTIPATH) {
  1488. int role;
  1489. if (rdev->desc_nr < 0 ||
  1490. rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
  1491. role = 0xffff;
  1492. rdev->desc_nr = -1;
  1493. } else
  1494. role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
  1495. switch(role) {
  1496. case 0xffff: /* spare */
  1497. break;
  1498. case 0xfffe: /* faulty */
  1499. set_bit(Faulty, &rdev->flags);
  1500. break;
  1501. default:
  1502. if ((le32_to_cpu(sb->feature_map) &
  1503. MD_FEATURE_RECOVERY_OFFSET))
  1504. rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
  1505. else
  1506. set_bit(In_sync, &rdev->flags);
  1507. rdev->raid_disk = role;
  1508. break;
  1509. }
  1510. if (sb->devflags & WriteMostly1)
  1511. set_bit(WriteMostly, &rdev->flags);
  1512. if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
  1513. set_bit(Replacement, &rdev->flags);
  1514. } else /* MULTIPATH are always insync */
  1515. set_bit(In_sync, &rdev->flags);
  1516. return 0;
  1517. }
  1518. static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
  1519. {
  1520. struct mdp_superblock_1 *sb;
  1521. struct md_rdev *rdev2;
  1522. int max_dev, i;
  1523. /* make rdev->sb match mddev and rdev data. */
  1524. sb = page_address(rdev->sb_page);
  1525. sb->feature_map = 0;
  1526. sb->pad0 = 0;
  1527. sb->recovery_offset = cpu_to_le64(0);
  1528. memset(sb->pad3, 0, sizeof(sb->pad3));
  1529. sb->utime = cpu_to_le64((__u64)mddev->utime);
  1530. sb->events = cpu_to_le64(mddev->events);
  1531. if (mddev->in_sync)
  1532. sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
  1533. else
  1534. sb->resync_offset = cpu_to_le64(0);
  1535. sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
  1536. sb->raid_disks = cpu_to_le32(mddev->raid_disks);
  1537. sb->size = cpu_to_le64(mddev->dev_sectors);
  1538. sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
  1539. sb->level = cpu_to_le32(mddev->level);
  1540. sb->layout = cpu_to_le32(mddev->layout);
  1541. if (test_bit(WriteMostly, &rdev->flags))
  1542. sb->devflags |= WriteMostly1;
  1543. else
  1544. sb->devflags &= ~WriteMostly1;
  1545. sb->data_offset = cpu_to_le64(rdev->data_offset);
  1546. sb->data_size = cpu_to_le64(rdev->sectors);
  1547. if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
  1548. sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
  1549. sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
  1550. }
  1551. if (rdev->raid_disk >= 0 &&
  1552. !test_bit(In_sync, &rdev->flags)) {
  1553. sb->feature_map |=
  1554. cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
  1555. sb->recovery_offset =
  1556. cpu_to_le64(rdev->recovery_offset);
  1557. }
  1558. if (test_bit(Replacement, &rdev->flags))
  1559. sb->feature_map |=
  1560. cpu_to_le32(MD_FEATURE_REPLACEMENT);
  1561. if (mddev->reshape_position != MaxSector) {
  1562. sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
  1563. sb->reshape_position = cpu_to_le64(mddev->reshape_position);
  1564. sb->new_layout = cpu_to_le32(mddev->new_layout);
  1565. sb->delta_disks = cpu_to_le32(mddev->delta_disks);
  1566. sb->new_level = cpu_to_le32(mddev->new_level);
  1567. sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
  1568. if (mddev->delta_disks == 0 &&
  1569. mddev->reshape_backwards)
  1570. sb->feature_map
  1571. |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
  1572. if (rdev->new_data_offset != rdev->data_offset) {
  1573. sb->feature_map
  1574. |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
  1575. sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
  1576. - rdev->data_offset));
  1577. }
  1578. }
  1579. if (rdev->badblocks.count == 0)
  1580. /* Nothing to do for bad blocks*/ ;
  1581. else if (sb->bblog_offset == 0)
  1582. /* Cannot record bad blocks on this device */
  1583. md_error(mddev, rdev);
  1584. else {
  1585. struct badblocks *bb = &rdev->badblocks;
  1586. u64 *bbp = (u64 *)page_address(rdev->bb_page);
  1587. u64 *p = bb->page;
  1588. sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
  1589. if (bb->changed) {
  1590. unsigned seq;
  1591. retry:
  1592. seq = read_seqbegin(&bb->lock);
  1593. memset(bbp, 0xff, PAGE_SIZE);
  1594. for (i = 0 ; i < bb->count ; i++) {
  1595. u64 internal_bb = p[i];
  1596. u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
  1597. | BB_LEN(internal_bb));
  1598. bbp[i] = cpu_to_le64(store_bb);
  1599. }
  1600. bb->changed = 0;
  1601. if (read_seqretry(&bb->lock, seq))
  1602. goto retry;
  1603. bb->sector = (rdev->sb_start +
  1604. (int)le32_to_cpu(sb->bblog_offset));
  1605. bb->size = le16_to_cpu(sb->bblog_size);
  1606. }
  1607. }
  1608. max_dev = 0;
  1609. rdev_for_each(rdev2, mddev)
  1610. if (rdev2->desc_nr+1 > max_dev)
  1611. max_dev = rdev2->desc_nr+1;
  1612. if (max_dev > le32_to_cpu(sb->max_dev)) {
  1613. int bmask;
  1614. sb->max_dev = cpu_to_le32(max_dev);
  1615. rdev->sb_size = max_dev * 2 + 256;
  1616. bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
  1617. if (rdev->sb_size & bmask)
  1618. rdev->sb_size = (rdev->sb_size | bmask) + 1;
  1619. } else
  1620. max_dev = le32_to_cpu(sb->max_dev);
  1621. for (i=0; i<max_dev;i++)
  1622. sb->dev_roles[i] = cpu_to_le16(0xfffe);
  1623. rdev_for_each(rdev2, mddev) {
  1624. i = rdev2->desc_nr;
  1625. if (test_bit(Faulty, &rdev2->flags))
  1626. sb->dev_roles[i] = cpu_to_le16(0xfffe);
  1627. else if (test_bit(In_sync, &rdev2->flags))
  1628. sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
  1629. else if (rdev2->raid_disk >= 0)
  1630. sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
  1631. else
  1632. sb->dev_roles[i] = cpu_to_le16(0xffff);
  1633. }
  1634. sb->sb_csum = calc_sb_1_csum(sb);
  1635. }
  1636. static unsigned long long
  1637. super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
  1638. {
  1639. struct mdp_superblock_1 *sb;
  1640. sector_t max_sectors;
  1641. if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
  1642. return 0; /* component must fit device */
  1643. if (rdev->data_offset != rdev->new_data_offset)
  1644. return 0; /* too confusing */
  1645. if (rdev->sb_start < rdev->data_offset) {
  1646. /* minor versions 1 and 2; superblock before data */
  1647. max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
  1648. max_sectors -= rdev->data_offset;
  1649. if (!num_sectors || num_sectors > max_sectors)
  1650. num_sectors = max_sectors;
  1651. } else if (rdev->mddev->bitmap_info.offset) {
  1652. /* minor version 0 with bitmap we can't move */
  1653. return 0;
  1654. } else {
  1655. /* minor version 0; superblock after data */
  1656. sector_t sb_start;
  1657. sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
  1658. sb_start &= ~(sector_t)(4*2 - 1);
  1659. max_sectors = rdev->sectors + sb_start - rdev->sb_start;
  1660. if (!num_sectors || num_sectors > max_sectors)
  1661. num_sectors = max_sectors;
  1662. rdev->sb_start = sb_start;
  1663. }
  1664. sb = page_address(rdev->sb_page);
  1665. sb->data_size = cpu_to_le64(num_sectors);
  1666. sb->super_offset = rdev->sb_start;
  1667. sb->sb_csum = calc_sb_1_csum(sb);
  1668. md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
  1669. rdev->sb_page);
  1670. md_super_wait(rdev->mddev);
  1671. return num_sectors;
  1672. }
  1673. static int
  1674. super_1_allow_new_offset(struct md_rdev *rdev,
  1675. unsigned long long new_offset)
  1676. {
  1677. /* All necessary checks on new >= old have been done */
  1678. struct bitmap *bitmap;
  1679. if (new_offset >= rdev->data_offset)
  1680. return 1;
  1681. /* with 1.0 metadata, there is no metadata to tread on
  1682. * so we can always move back */
  1683. if (rdev->mddev->minor_version == 0)
  1684. return 1;
  1685. /* otherwise we must be sure not to step on
  1686. * any metadata, so stay:
  1687. * 36K beyond start of superblock
  1688. * beyond end of badblocks
  1689. * beyond write-intent bitmap
  1690. */
  1691. if (rdev->sb_start + (32+4)*2 > new_offset)
  1692. return 0;
  1693. bitmap = rdev->mddev->bitmap;
  1694. if (bitmap && !rdev->mddev->bitmap_info.file &&
  1695. rdev->sb_start + rdev->mddev->bitmap_info.offset +
  1696. bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
  1697. return 0;
  1698. if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
  1699. return 0;
  1700. return 1;
  1701. }
  1702. static struct super_type super_types[] = {
  1703. [0] = {
  1704. .name = "0.90.0",
  1705. .owner = THIS_MODULE,
  1706. .load_super = super_90_load,
  1707. .validate_super = super_90_validate,
  1708. .sync_super = super_90_sync,
  1709. .rdev_size_change = super_90_rdev_size_change,
  1710. .allow_new_offset = super_90_allow_new_offset,
  1711. },
  1712. [1] = {
  1713. .name = "md-1",
  1714. .owner = THIS_MODULE,
  1715. .load_super = super_1_load,
  1716. .validate_super = super_1_validate,
  1717. .sync_super = super_1_sync,
  1718. .rdev_size_change = super_1_rdev_size_change,
  1719. .allow_new_offset = super_1_allow_new_offset,
  1720. },
  1721. };
  1722. static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
  1723. {
  1724. if (mddev->sync_super) {
  1725. mddev->sync_super(mddev, rdev);
  1726. return;
  1727. }
  1728. BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
  1729. super_types[mddev->major_version].sync_super(mddev, rdev);
  1730. }
  1731. static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
  1732. {
  1733. struct md_rdev *rdev, *rdev2;
  1734. rcu_read_lock();
  1735. rdev_for_each_rcu(rdev, mddev1)
  1736. rdev_for_each_rcu(rdev2, mddev2)
  1737. if (rdev->bdev->bd_contains ==
  1738. rdev2->bdev->bd_contains) {
  1739. rcu_read_unlock();
  1740. return 1;
  1741. }
  1742. rcu_read_unlock();
  1743. return 0;
  1744. }
  1745. static LIST_HEAD(pending_raid_disks);
  1746. /*
  1747. * Try to register data integrity profile for an mddev
  1748. *
  1749. * This is called when an array is started and after a disk has been kicked
  1750. * from the array. It only succeeds if all working and active component devices
  1751. * are integrity capable with matching profiles.
  1752. */
  1753. int md_integrity_register(struct mddev *mddev)
  1754. {
  1755. struct md_rdev *rdev, *reference = NULL;
  1756. if (list_empty(&mddev->disks))
  1757. return 0; /* nothing to do */
  1758. if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
  1759. return 0; /* shouldn't register, or already is */
  1760. rdev_for_each(rdev, mddev) {
  1761. /* skip spares and non-functional disks */
  1762. if (test_bit(Faulty, &rdev->flags))
  1763. continue;
  1764. if (rdev->raid_disk < 0)
  1765. continue;
  1766. if (!reference) {
  1767. /* Use the first rdev as the reference */
  1768. reference = rdev;
  1769. continue;
  1770. }
  1771. /* does this rdev's profile match the reference profile? */
  1772. if (blk_integrity_compare(reference->bdev->bd_disk,
  1773. rdev->bdev->bd_disk) < 0)
  1774. return -EINVAL;
  1775. }
  1776. if (!reference || !bdev_get_integrity(reference->bdev))
  1777. return 0;
  1778. /*
  1779. * All component devices are integrity capable and have matching
  1780. * profiles, register the common profile for the md device.
  1781. */
  1782. if (blk_integrity_register(mddev->gendisk,
  1783. bdev_get_integrity(reference->bdev)) != 0) {
  1784. printk(KERN_ERR "md: failed to register integrity for %s\n",
  1785. mdname(mddev));
  1786. return -EINVAL;
  1787. }
  1788. printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
  1789. if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
  1790. printk(KERN_ERR "md: failed to create integrity pool for %s\n",
  1791. mdname(mddev));
  1792. return -EINVAL;
  1793. }
  1794. return 0;
  1795. }
  1796. EXPORT_SYMBOL(md_integrity_register);
  1797. /* Disable data integrity if non-capable/non-matching disk is being added */
  1798. void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
  1799. {
  1800. struct blk_integrity *bi_rdev;
  1801. struct blk_integrity *bi_mddev;
  1802. if (!mddev->gendisk)
  1803. return;
  1804. bi_rdev = bdev_get_integrity(rdev->bdev);
  1805. bi_mddev = blk_get_integrity(mddev->gendisk);
  1806. if (!bi_mddev) /* nothing to do */
  1807. return;
  1808. if (rdev->raid_disk < 0) /* skip spares */
  1809. return;
  1810. if (bi_rdev && blk_integrity_compare(mddev->gendisk,
  1811. rdev->bdev->bd_disk) >= 0)
  1812. return;
  1813. printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
  1814. blk_integrity_unregister(mddev->gendisk);
  1815. }
  1816. EXPORT_SYMBOL(md_integrity_add_rdev);
  1817. static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev)
  1818. {
  1819. char b[BDEVNAME_SIZE];
  1820. struct kobject *ko;
  1821. char *s;
  1822. int err;
  1823. if (rdev->mddev) {
  1824. MD_BUG();
  1825. return -EINVAL;
  1826. }
  1827. /* prevent duplicates */
  1828. if (find_rdev(mddev, rdev->bdev->bd_dev))
  1829. return -EEXIST;
  1830. /* make sure rdev->sectors exceeds mddev->dev_sectors */
  1831. if (rdev->sectors && (mddev->dev_sectors == 0 ||
  1832. rdev->sectors < mddev->dev_sectors)) {
  1833. if (mddev->pers) {
  1834. /* Cannot change size, so fail
  1835. * If mddev->level <= 0, then we don't care
  1836. * about aligning sizes (e.g. linear)
  1837. */
  1838. if (mddev->level > 0)
  1839. return -ENOSPC;
  1840. } else
  1841. mddev->dev_sectors = rdev->sectors;
  1842. }
  1843. /* Verify rdev->desc_nr is unique.
  1844. * If it is -1, assign a free number, else
  1845. * check number is not in use
  1846. */
  1847. if (rdev->desc_nr < 0) {
  1848. int choice = 0;
  1849. if (mddev->pers) choice = mddev->raid_disks;
  1850. while (find_rdev_nr(mddev, choice))
  1851. choice++;
  1852. rdev->desc_nr = choice;
  1853. } else {
  1854. if (find_rdev_nr(mddev, rdev->desc_nr))
  1855. return -EBUSY;
  1856. }
  1857. if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
  1858. printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
  1859. mdname(mddev), mddev->max_disks);
  1860. return -EBUSY;
  1861. }
  1862. bdevname(rdev->bdev,b);
  1863. while ( (s=strchr(b, '/')) != NULL)
  1864. *s = '!';
  1865. rdev->mddev = mddev;
  1866. printk(KERN_INFO "md: bind<%s>\n", b);
  1867. if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
  1868. goto fail;
  1869. ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
  1870. if (sysfs_create_link(&rdev->kobj, ko, "block"))
  1871. /* failure here is OK */;
  1872. rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
  1873. list_add_rcu(&rdev->same_set, &mddev->disks);
  1874. bd_link_disk_holder(rdev->bdev, mddev->gendisk);
  1875. /* May as well allow recovery to be retried once */
  1876. mddev->recovery_disabled++;
  1877. return 0;
  1878. fail:
  1879. printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
  1880. b, mdname(mddev));
  1881. return err;
  1882. }
  1883. static void md_delayed_delete(struct work_struct *ws)
  1884. {
  1885. struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
  1886. kobject_del(&rdev->kobj);
  1887. kobject_put(&rdev->kobj);
  1888. }
  1889. static void unbind_rdev_from_array(struct md_rdev * rdev)
  1890. {
  1891. char b[BDEVNAME_SIZE];
  1892. if (!rdev->mddev) {
  1893. MD_BUG();
  1894. return;
  1895. }
  1896. bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
  1897. list_del_rcu(&rdev->same_set);
  1898. printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
  1899. rdev->mddev = NULL;
  1900. sysfs_remove_link(&rdev->kobj, "block");
  1901. sysfs_put(rdev->sysfs_state);
  1902. rdev->sysfs_state = NULL;
  1903. rdev->badblocks.count = 0;
  1904. /* We need to delay this, otherwise we can deadlock when
  1905. * writing to 'remove' to "dev/state". We also need
  1906. * to delay it due to rcu usage.
  1907. */
  1908. synchronize_rcu();
  1909. INIT_WORK(&rdev->del_work, md_delayed_delete);
  1910. kobject_get(&rdev->kobj);
  1911. queue_work(md_misc_wq, &rdev->del_work);
  1912. }
  1913. /*
  1914. * prevent the device from being mounted, repartitioned or
  1915. * otherwise reused by a RAID array (or any other kernel
  1916. * subsystem), by bd_claiming the device.
  1917. */
  1918. static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
  1919. {
  1920. int err = 0;
  1921. struct block_device *bdev;
  1922. char b[BDEVNAME_SIZE];
  1923. bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
  1924. shared ? (struct md_rdev *)lock_rdev : rdev);
  1925. if (IS_ERR(bdev)) {
  1926. printk(KERN_ERR "md: could not open %s.\n",
  1927. __bdevname(dev, b));
  1928. return PTR_ERR(bdev);
  1929. }
  1930. rdev->bdev = bdev;
  1931. return err;
  1932. }
  1933. static void unlock_rdev(struct md_rdev *rdev)
  1934. {
  1935. struct block_device *bdev = rdev->bdev;
  1936. rdev->bdev = NULL;
  1937. if (!bdev)
  1938. MD_BUG();
  1939. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1940. }
  1941. void md_autodetect_dev(dev_t dev);
  1942. static void export_rdev(struct md_rdev * rdev)
  1943. {
  1944. char b[BDEVNAME_SIZE];
  1945. printk(KERN_INFO "md: export_rdev(%s)\n",
  1946. bdevname(rdev->bdev,b));
  1947. if (rdev->mddev)
  1948. MD_BUG();
  1949. md_rdev_clear(rdev);
  1950. #ifndef MODULE
  1951. if (test_bit(AutoDetected, &rdev->flags))
  1952. md_autodetect_dev(rdev->bdev->bd_dev);
  1953. #endif
  1954. unlock_rdev(rdev);
  1955. kobject_put(&rdev->kobj);
  1956. }
  1957. static void kick_rdev_from_array(struct md_rdev * rdev)
  1958. {
  1959. unbind_rdev_from_array(rdev);
  1960. export_rdev(rdev);
  1961. }
  1962. static void export_array(struct mddev *mddev)
  1963. {
  1964. struct md_rdev *rdev, *tmp;
  1965. rdev_for_each_safe(rdev, tmp, mddev) {
  1966. if (!rdev->mddev) {
  1967. MD_BUG();
  1968. continue;
  1969. }
  1970. kick_rdev_from_array(rdev);
  1971. }
  1972. if (!list_empty(&mddev->disks))
  1973. MD_BUG();
  1974. mddev->raid_disks = 0;
  1975. mddev->major_version = 0;
  1976. }
  1977. static void print_desc(mdp_disk_t *desc)
  1978. {
  1979. printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
  1980. desc->major,desc->minor,desc->raid_disk,desc->state);
  1981. }
  1982. static void print_sb_90(mdp_super_t *sb)
  1983. {
  1984. int i;
  1985. printk(KERN_INFO
  1986. "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
  1987. sb->major_version, sb->minor_version, sb->patch_version,
  1988. sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
  1989. sb->ctime);
  1990. printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
  1991. sb->level, sb->size, sb->nr_disks, sb->raid_disks,
  1992. sb->md_minor, sb->layout, sb->chunk_size);
  1993. printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
  1994. " FD:%d SD:%d CSUM:%08x E:%08lx\n",
  1995. sb->utime, sb->state, sb->active_disks, sb->working_disks,
  1996. sb->failed_disks, sb->spare_disks,
  1997. sb->sb_csum, (unsigned long)sb->events_lo);
  1998. printk(KERN_INFO);
  1999. for (i = 0; i < MD_SB_DISKS; i++) {
  2000. mdp_disk_t *desc;
  2001. desc = sb->disks + i;
  2002. if (desc->number || desc->major || desc->minor ||
  2003. desc->raid_disk || (desc->state && (desc->state != 4))) {
  2004. printk(" D %2d: ", i);
  2005. print_desc(desc);
  2006. }
  2007. }
  2008. printk(KERN_INFO "md: THIS: ");
  2009. print_desc(&sb->this_disk);
  2010. }
  2011. static void print_sb_1(struct mdp_superblock_1 *sb)
  2012. {
  2013. __u8 *uuid;
  2014. uuid = sb->set_uuid;
  2015. printk(KERN_INFO
  2016. "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
  2017. "md: Name: \"%s\" CT:%llu\n",
  2018. le32_to_cpu(sb->major_version),
  2019. le32_to_cpu(sb->feature_map),
  2020. uuid,
  2021. sb->set_name,
  2022. (unsigned long long)le64_to_cpu(sb->ctime)
  2023. & MD_SUPERBLOCK_1_TIME_SEC_MASK);
  2024. uuid = sb->device_uuid;
  2025. printk(KERN_INFO
  2026. "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
  2027. " RO:%llu\n"
  2028. "md: Dev:%08x UUID: %pU\n"
  2029. "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
  2030. "md: (MaxDev:%u) \n",
  2031. le32_to_cpu(sb->level),
  2032. (unsigned long long)le64_to_cpu(sb->size),
  2033. le32_to_cpu(sb->raid_disks),
  2034. le32_to_cpu(sb->layout),
  2035. le32_to_cpu(sb->chunksize),
  2036. (unsigned long long)le64_to_cpu(sb->data_offset),
  2037. (unsigned long long)le64_to_cpu(sb->data_size),
  2038. (unsigned long long)le64_to_cpu(sb->super_offset),
  2039. (unsigned long long)le64_to_cpu(sb->recovery_offset),
  2040. le32_to_cpu(sb->dev_number),
  2041. uuid,
  2042. sb->devflags,
  2043. (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
  2044. (unsigned long long)le64_to_cpu(sb->events),
  2045. (unsigned long long)le64_to_cpu(sb->resync_offset),
  2046. le32_to_cpu(sb->sb_csum),
  2047. le32_to_cpu(sb->max_dev)
  2048. );
  2049. }
  2050. static void print_rdev(struct md_rdev *rdev, int major_version)
  2051. {
  2052. char b[BDEVNAME_SIZE];
  2053. printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
  2054. bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
  2055. test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
  2056. rdev->desc_nr);
  2057. if (rdev->sb_loaded) {
  2058. printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
  2059. switch (major_version) {
  2060. case 0:
  2061. print_sb_90(page_address(rdev->sb_page));
  2062. break;
  2063. case 1:
  2064. print_sb_1(page_address(rdev->sb_page));
  2065. break;
  2066. }
  2067. } else
  2068. printk(KERN_INFO "md: no rdev superblock!\n");
  2069. }
  2070. static void md_print_devices(void)
  2071. {
  2072. struct list_head *tmp;
  2073. struct md_rdev *rdev;
  2074. struct mddev *mddev;
  2075. char b[BDEVNAME_SIZE];
  2076. printk("\n");
  2077. printk("md: **********************************\n");
  2078. printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
  2079. printk("md: **********************************\n");
  2080. for_each_mddev(mddev, tmp) {
  2081. if (mddev->bitmap)
  2082. bitmap_print_sb(mddev->bitmap);
  2083. else
  2084. printk("%s: ", mdname(mddev));
  2085. rdev_for_each(rdev, mddev)
  2086. printk("<%s>", bdevname(rdev->bdev,b));
  2087. printk("\n");
  2088. rdev_for_each(rdev, mddev)
  2089. print_rdev(rdev, mddev->major_version);
  2090. }
  2091. printk("md: **********************************\n");
  2092. printk("\n");
  2093. }
  2094. static void sync_sbs(struct mddev * mddev, int nospares)
  2095. {
  2096. /* Update each superblock (in-memory image), but
  2097. * if we are allowed to, skip spares which already
  2098. * have the right event counter, or have one earlier
  2099. * (which would mean they aren't being marked as dirty
  2100. * with the rest of the array)
  2101. */
  2102. struct md_rdev *rdev;
  2103. rdev_for_each(rdev, mddev) {
  2104. if (rdev->sb_events == mddev->events ||
  2105. (nospares &&
  2106. rdev->raid_disk < 0 &&
  2107. rdev->sb_events+1 == mddev->events)) {
  2108. /* Don't update this superblock */
  2109. rdev->sb_loaded = 2;
  2110. } else {
  2111. sync_super(mddev, rdev);
  2112. rdev->sb_loaded = 1;
  2113. }
  2114. }
  2115. }
  2116. static void md_update_sb(struct mddev * mddev, int force_change)
  2117. {
  2118. struct md_rdev *rdev;
  2119. int sync_req;
  2120. int nospares = 0;
  2121. int any_badblocks_changed = 0;
  2122. if (mddev->ro) {
  2123. if (force_change)
  2124. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  2125. return;
  2126. }
  2127. repeat:
  2128. /* First make sure individual recovery_offsets are correct */
  2129. rdev_for_each(rdev, mddev) {
  2130. if (rdev->raid_disk >= 0 &&
  2131. mddev->delta_disks >= 0 &&
  2132. !test_bit(In_sync, &rdev->flags) &&
  2133. mddev->curr_resync_completed > rdev->recovery_offset)
  2134. rdev->recovery_offset = mddev->curr_resync_completed;
  2135. }
  2136. if (!mddev->persistent) {
  2137. clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
  2138. clear_bit(MD_CHANGE_DEVS, &mddev->flags);
  2139. if (!mddev->external) {
  2140. clear_bit(MD_CHANGE_PENDING, &mddev->flags);
  2141. rdev_for_each(rdev, mddev) {
  2142. if (rdev->badblocks.changed) {
  2143. rdev->badblocks.changed = 0;
  2144. md_ack_all_badblocks(&rdev->badblocks);
  2145. md_error(mddev, rdev);
  2146. }
  2147. clear_bit(Blocked, &rdev->flags);
  2148. clear_bit(BlockedBadBlocks, &rdev->flags);
  2149. wake_up(&rdev->blocked_wait);
  2150. }
  2151. }
  2152. wake_up(&mddev->sb_wait);
  2153. return;
  2154. }
  2155. spin_lock_irq(&mddev->write_lock);
  2156. mddev->utime = get_seconds();
  2157. if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
  2158. force_change = 1;
  2159. if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
  2160. /* just a clean<-> dirty transition, possibly leave spares alone,
  2161. * though if events isn't the right even/odd, we will have to do
  2162. * spares after all
  2163. */
  2164. nospares = 1;
  2165. if (force_change)
  2166. nospares = 0;
  2167. if (mddev->degraded)
  2168. /* If the array is degraded, then skipping spares is both
  2169. * dangerous and fairly pointless.
  2170. * Dangerous because a device that was removed from the array
  2171. * might have a event_count that still looks up-to-date,
  2172. * so it can be re-added without a resync.
  2173. * Pointless because if there are any spares to skip,
  2174. * then a recovery will happen and soon that array won't
  2175. * be degraded any more and the spare can go back to sleep then.
  2176. */
  2177. nospares = 0;
  2178. sync_req = mddev->in_sync;
  2179. /* If this is just a dirty<->clean transition, and the array is clean
  2180. * and 'events' is odd, we can roll back to the previous clean state */
  2181. if (nospares
  2182. && (mddev->in_sync && mddev->recovery_cp == MaxSector)
  2183. && mddev->can_decrease_events
  2184. && mddev->events != 1) {
  2185. mddev->events--;
  2186. mddev->can_decrease_events = 0;
  2187. } else {
  2188. /* otherwise we have to go forward and ... */
  2189. mddev->events ++;
  2190. mddev->can_decrease_events = nospares;
  2191. }
  2192. if (!mddev->events) {
  2193. /*
  2194. * oops, this 64-bit counter should never wrap.
  2195. * Either we are in around ~1 trillion A.C., assuming
  2196. * 1 reboot per second, or we have a bug:
  2197. */
  2198. MD_BUG();
  2199. mddev->events --;
  2200. }
  2201. rdev_for_each(rdev, mddev) {
  2202. if (rdev->badblocks.changed)
  2203. any_badblocks_changed++;
  2204. if (test_bit(Faulty, &rdev->flags))
  2205. set_bit(FaultRecorded, &rdev->flags);
  2206. }
  2207. sync_sbs(mddev, nospares);
  2208. spin_unlock_irq(&mddev->write_lock);
  2209. pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
  2210. mdname(mddev), mddev->in_sync);
  2211. bitmap_update_sb(mddev->bitmap);
  2212. rdev_for_each(rdev, mddev) {
  2213. char b[BDEVNAME_SIZE];
  2214. if (rdev->sb_loaded != 1)
  2215. continue; /* no noise on spare devices */
  2216. if (!test_bit(Faulty, &rdev->flags) &&
  2217. rdev->saved_raid_disk == -1) {
  2218. md_super_write(mddev,rdev,
  2219. rdev->sb_start, rdev->sb_size,
  2220. rdev->sb_page);
  2221. pr_debug("md: (write) %s's sb offset: %llu\n",
  2222. bdevname(rdev->bdev, b),
  2223. (unsigned long long)rdev->sb_start);
  2224. rdev->sb_events = mddev->events;
  2225. if (rdev->badblocks.size) {
  2226. md_super_write(mddev, rdev,
  2227. rdev->badblocks.sector,
  2228. rdev->badblocks.size << 9,
  2229. rdev->bb_page);
  2230. rdev->badblocks.size = 0;
  2231. }
  2232. } else if (test_bit(Faulty, &rdev->flags))
  2233. pr_debug("md: %s (skipping faulty)\n",
  2234. bdevname(rdev->bdev, b));
  2235. else
  2236. pr_debug("(skipping incremental s/r ");
  2237. if (mddev->level == LEVEL_MULTIPATH)
  2238. /* only need to write one superblock... */
  2239. break;
  2240. }
  2241. md_super_wait(mddev);
  2242. /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
  2243. spin_lock_irq(&mddev->write_lock);
  2244. if (mddev->in_sync != sync_req ||
  2245. test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
  2246. /* have to write it out again */
  2247. spin_unlock_irq(&mddev->write_lock);
  2248. goto repeat;
  2249. }
  2250. clear_bit(MD_CHANGE_PENDING, &mddev->flags);
  2251. spin_unlock_irq(&mddev->write_lock);
  2252. wake_up(&mddev->sb_wait);
  2253. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  2254. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  2255. rdev_for_each(rdev, mddev) {
  2256. if (test_and_clear_bit(FaultRecorded, &rdev->flags))
  2257. clear_bit(Blocked, &rdev->flags);
  2258. if (any_badblocks_changed)
  2259. md_ack_all_badblocks(&rdev->badblocks);
  2260. clear_bit(BlockedBadBlocks, &rdev->flags);
  2261. wake_up(&rdev->blocked_wait);
  2262. }
  2263. }
  2264. /* words written to sysfs files may, or may not, be \n terminated.
  2265. * We want to accept with case. For this we use cmd_match.
  2266. */
  2267. static int cmd_match(const char *cmd, const char *str)
  2268. {
  2269. /* See if cmd, written into a sysfs file, matches
  2270. * str. They must either be the same, or cmd can
  2271. * have a trailing newline
  2272. */
  2273. while (*cmd && *str && *cmd == *str) {
  2274. cmd++;
  2275. str++;
  2276. }
  2277. if (*cmd == '\n')
  2278. cmd++;
  2279. if (*str || *cmd)
  2280. return 0;
  2281. return 1;
  2282. }
  2283. struct rdev_sysfs_entry {
  2284. struct attribute attr;
  2285. ssize_t (*show)(struct md_rdev *, char *);
  2286. ssize_t (*store)(struct md_rdev *, const char *, size_t);
  2287. };
  2288. static ssize_t
  2289. state_show(struct md_rdev *rdev, char *page)
  2290. {
  2291. char *sep = "";
  2292. size_t len = 0;
  2293. if (test_bit(Faulty, &rdev->flags) ||
  2294. rdev->badblocks.unacked_exist) {
  2295. len+= sprintf(page+len, "%sfaulty",sep);
  2296. sep = ",";
  2297. }
  2298. if (test_bit(In_sync, &rdev->flags)) {
  2299. len += sprintf(page+len, "%sin_sync",sep);
  2300. sep = ",";
  2301. }
  2302. if (test_bit(WriteMostly, &rdev->flags)) {
  2303. len += sprintf(page+len, "%swrite_mostly",sep);
  2304. sep = ",";
  2305. }
  2306. if (test_bit(Blocked, &rdev->flags) ||
  2307. (rdev->badblocks.unacked_exist
  2308. && !test_bit(Faulty, &rdev->flags))) {
  2309. len += sprintf(page+len, "%sblocked", sep);
  2310. sep = ",";
  2311. }
  2312. if (!test_bit(Faulty, &rdev->flags) &&
  2313. !test_bit(In_sync, &rdev->flags)) {
  2314. len += sprintf(page+len, "%sspare", sep);
  2315. sep = ",";
  2316. }
  2317. if (test_bit(WriteErrorSeen, &rdev->flags)) {
  2318. len += sprintf(page+len, "%swrite_error", sep);
  2319. sep = ",";
  2320. }
  2321. if (test_bit(WantReplacement, &rdev->flags)) {
  2322. len += sprintf(page+len, "%swant_replacement", sep);
  2323. sep = ",";
  2324. }
  2325. if (test_bit(Replacement, &rdev->flags)) {
  2326. len += sprintf(page+len, "%sreplacement", sep);
  2327. sep = ",";
  2328. }
  2329. return len+sprintf(page+len, "\n");
  2330. }
  2331. static ssize_t
  2332. state_store(struct md_rdev *rdev, const char *buf, size_t len)
  2333. {
  2334. /* can write
  2335. * faulty - simulates an error
  2336. * remove - disconnects the device
  2337. * writemostly - sets write_mostly
  2338. * -writemostly - clears write_mostly
  2339. * blocked - sets the Blocked flags
  2340. * -blocked - clears the Blocked and possibly simulates an error
  2341. * insync - sets Insync providing device isn't active
  2342. * write_error - sets WriteErrorSeen
  2343. * -write_error - clears WriteErrorSeen
  2344. */
  2345. int err = -EINVAL;
  2346. if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
  2347. md_error(rdev->mddev, rdev);
  2348. if (test_bit(Faulty, &rdev->flags))
  2349. err = 0;
  2350. else
  2351. err = -EBUSY;
  2352. } else if (cmd_match(buf, "remove")) {
  2353. if (rdev->raid_disk >= 0)
  2354. err = -EBUSY;
  2355. else {
  2356. struct mddev *mddev = rdev->mddev;
  2357. kick_rdev_from_array(rdev);
  2358. if (mddev->pers)
  2359. md_update_sb(mddev, 1);
  2360. md_new_event(mddev);
  2361. err = 0;
  2362. }
  2363. } else if (cmd_match(buf, "writemostly")) {
  2364. set_bit(WriteMostly, &rdev->flags);
  2365. err = 0;
  2366. } else if (cmd_match(buf, "-writemostly")) {
  2367. clear_bit(WriteMostly, &rdev->flags);
  2368. err = 0;
  2369. } else if (cmd_match(buf, "blocked")) {
  2370. set_bit(Blocked, &rdev->flags);
  2371. err = 0;
  2372. } else if (cmd_match(buf, "-blocked")) {
  2373. if (!test_bit(Faulty, &rdev->flags) &&
  2374. rdev->badblocks.unacked_exist) {
  2375. /* metadata handler doesn't understand badblocks,
  2376. * so we need to fail the device
  2377. */
  2378. md_error(rdev->mddev, rdev);
  2379. }
  2380. clear_bit(Blocked, &rdev->flags);
  2381. clear_bit(BlockedBadBlocks, &rdev->flags);
  2382. wake_up(&rdev->blocked_wait);
  2383. set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
  2384. md_wakeup_thread(rdev->mddev->thread);
  2385. err = 0;
  2386. } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
  2387. set_bit(In_sync, &rdev->flags);
  2388. err = 0;
  2389. } else if (cmd_match(buf, "write_error")) {
  2390. set_bit(WriteErrorSeen, &rdev->flags);
  2391. err = 0;
  2392. } else if (cmd_match(buf, "-write_error")) {
  2393. clear_bit(WriteErrorSeen, &rdev->flags);
  2394. err = 0;
  2395. } else if (cmd_match(buf, "want_replacement")) {
  2396. /* Any non-spare device that is not a replacement can
  2397. * become want_replacement at any time, but we then need to
  2398. * check if recovery is needed.
  2399. */
  2400. if (rdev->raid_disk >= 0 &&
  2401. !test_bit(Replacement, &rdev->flags))
  2402. set_bit(WantReplacement, &rdev->flags);
  2403. set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
  2404. md_wakeup_thread(rdev->mddev->thread);
  2405. err = 0;
  2406. } else if (cmd_match(buf, "-want_replacement")) {
  2407. /* Clearing 'want_replacement' is always allowed.
  2408. * Once replacements starts it is too late though.
  2409. */
  2410. err = 0;
  2411. clear_bit(WantReplacement, &rdev->flags);
  2412. } else if (cmd_match(buf, "replacement")) {
  2413. /* Can only set a device as a replacement when array has not
  2414. * yet been started. Once running, replacement is automatic
  2415. * from spares, or by assigning 'slot'.
  2416. */
  2417. if (rdev->mddev->pers)
  2418. err = -EBUSY;
  2419. else {
  2420. set_bit(Replacement, &rdev->flags);
  2421. err = 0;
  2422. }
  2423. } else if (cmd_match(buf, "-replacement")) {
  2424. /* Similarly, can only clear Replacement before start */
  2425. if (rdev->mddev->pers)
  2426. err = -EBUSY;
  2427. else {
  2428. clear_bit(Replacement, &rdev->flags);
  2429. err = 0;
  2430. }
  2431. }
  2432. if (!err)
  2433. sysfs_notify_dirent_safe(rdev->sysfs_state);
  2434. return err ? err : len;
  2435. }
  2436. static struct rdev_sysfs_entry rdev_state =
  2437. __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
  2438. static ssize_t
  2439. errors_show(struct md_rdev *rdev, char *page)
  2440. {
  2441. return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
  2442. }
  2443. static ssize_t
  2444. errors_store(struct md_rdev *rdev, const char *buf, size_t len)
  2445. {
  2446. char *e;
  2447. unsigned long n = simple_strtoul(buf, &e, 10);
  2448. if (*buf && (*e == 0 || *e == '\n')) {
  2449. atomic_set(&rdev->corrected_errors, n);
  2450. return len;
  2451. }
  2452. return -EINVAL;
  2453. }
  2454. static struct rdev_sysfs_entry rdev_errors =
  2455. __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
  2456. static ssize_t
  2457. slot_show(struct md_rdev *rdev, char *page)
  2458. {
  2459. if (rdev->raid_disk < 0)
  2460. return sprintf(page, "none\n");
  2461. else
  2462. return sprintf(page, "%d\n", rdev->raid_disk);
  2463. }
  2464. static ssize_t
  2465. slot_store(struct md_rdev *rdev, const char *buf, size_t len)
  2466. {
  2467. char *e;
  2468. int err;
  2469. int slot = simple_strtoul(buf, &e, 10);
  2470. if (strncmp(buf, "none", 4)==0)
  2471. slot = -1;
  2472. else if (e==buf || (*e && *e!= '\n'))
  2473. return -EINVAL;
  2474. if (rdev->mddev->pers && slot == -1) {
  2475. /* Setting 'slot' on an active array requires also
  2476. * updating the 'rd%d' link, and communicating
  2477. * with the personality with ->hot_*_disk.
  2478. * For now we only support removing
  2479. * failed/spare devices. This normally happens automatically,
  2480. * but not when the metadata is externally managed.
  2481. */
  2482. if (rdev->raid_disk == -1)
  2483. return -EEXIST;
  2484. /* personality does all needed checks */
  2485. if (rdev->mddev->pers->hot_remove_disk == NULL)
  2486. return -EINVAL;
  2487. clear_bit(Blocked, &rdev->flags);
  2488. remove_and_add_spares(rdev->mddev, rdev);
  2489. if (rdev->raid_disk >= 0)
  2490. return -EBUSY;
  2491. set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
  2492. md_wakeup_thread(rdev->mddev->thread);
  2493. } else if (rdev->mddev->pers) {
  2494. /* Activating a spare .. or possibly reactivating
  2495. * if we ever get bitmaps working here.
  2496. */
  2497. if (rdev->raid_disk != -1)
  2498. return -EBUSY;
  2499. if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
  2500. return -EBUSY;
  2501. if (rdev->mddev->pers->hot_add_disk == NULL)
  2502. return -EINVAL;
  2503. if (slot >= rdev->mddev->raid_disks &&
  2504. slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
  2505. return -ENOSPC;
  2506. rdev->raid_disk = slot;
  2507. if (test_bit(In_sync, &rdev->flags))
  2508. rdev->saved_raid_disk = slot;
  2509. else
  2510. rdev->saved_raid_disk = -1;
  2511. clear_bit(In_sync, &rdev->flags);
  2512. err = rdev->mddev->pers->
  2513. hot_add_disk(rdev->mddev, rdev);
  2514. if (err) {
  2515. rdev->raid_disk = -1;
  2516. return err;
  2517. } else
  2518. sysfs_notify_dirent_safe(rdev->sysfs_state);
  2519. if (sysfs_link_rdev(rdev->mddev, rdev))
  2520. /* failure here is OK */;
  2521. /* don't wakeup anyone, leave that to userspace. */
  2522. } else {
  2523. if (slot >= rdev->mddev->raid_disks &&
  2524. slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
  2525. return -ENOSPC;
  2526. rdev->raid_disk = slot;
  2527. /* assume it is working */
  2528. clear_bit(Faulty, &rdev->flags);
  2529. clear_bit(WriteMostly, &rdev->flags);
  2530. set_bit(In_sync, &rdev->flags);
  2531. sysfs_notify_dirent_safe(rdev->sysfs_state);
  2532. }
  2533. return len;
  2534. }
  2535. static struct rdev_sysfs_entry rdev_slot =
  2536. __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
  2537. static ssize_t
  2538. offset_show(struct md_rdev *rdev, char *page)
  2539. {
  2540. return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
  2541. }
  2542. static ssize_t
  2543. offset_store(struct md_rdev *rdev, const char *buf, size_t len)
  2544. {
  2545. unsigned long long offset;
  2546. if (kstrtoull(buf, 10, &offset) < 0)
  2547. return -EINVAL;
  2548. if (rdev->mddev->pers && rdev->raid_disk >= 0)
  2549. return -EBUSY;
  2550. if (rdev->sectors && rdev->mddev->external)
  2551. /* Must set offset before size, so overlap checks
  2552. * can be sane */
  2553. return -EBUSY;
  2554. rdev->data_offset = offset;
  2555. rdev->new_data_offset = offset;
  2556. return len;
  2557. }
  2558. static struct rdev_sysfs_entry rdev_offset =
  2559. __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
  2560. static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
  2561. {
  2562. return sprintf(page, "%llu\n",
  2563. (unsigned long long)rdev->new_data_offset);
  2564. }
  2565. static ssize_t new_offset_store(struct md_rdev *rdev,
  2566. const char *buf, size_t len)
  2567. {
  2568. unsigned long long new_offset;
  2569. struct mddev *mddev = rdev->mddev;
  2570. if (kstrtoull(buf, 10, &new_offset) < 0)
  2571. return -EINVAL;
  2572. if (mddev->sync_thread)
  2573. return -EBUSY;
  2574. if (new_offset == rdev->data_offset)
  2575. /* reset is always permitted */
  2576. ;
  2577. else if (new_offset > rdev->data_offset) {
  2578. /* must not push array size beyond rdev_sectors */
  2579. if (new_offset - rdev->data_offset
  2580. + mddev->dev_sectors > rdev->sectors)
  2581. return -E2BIG;
  2582. }
  2583. /* Metadata worries about other space details. */
  2584. /* decreasing the offset is inconsistent with a backwards
  2585. * reshape.
  2586. */
  2587. if (new_offset < rdev->data_offset &&
  2588. mddev->reshape_backwards)
  2589. return -EINVAL;
  2590. /* Increasing offset is inconsistent with forwards
  2591. * reshape. reshape_direction should be set to
  2592. * 'backwards' first.
  2593. */
  2594. if (new_offset > rdev->data_offset &&
  2595. !mddev->reshape_backwards)
  2596. return -EINVAL;
  2597. if (mddev->pers && mddev->persistent &&
  2598. !super_types[mddev->major_version]
  2599. .allow_new_offset(rdev, new_offset))
  2600. return -E2BIG;
  2601. rdev->new_data_offset = new_offset;
  2602. if (new_offset > rdev->data_offset)
  2603. mddev->reshape_backwards = 1;
  2604. else if (new_offset < rdev->data_offset)
  2605. mddev->reshape_backwards = 0;
  2606. return len;
  2607. }
  2608. static struct rdev_sysfs_entry rdev_new_offset =
  2609. __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
  2610. static ssize_t
  2611. rdev_size_show(struct md_rdev *rdev, char *page)
  2612. {
  2613. return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
  2614. }
  2615. static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
  2616. {
  2617. /* check if two start/length pairs overlap */
  2618. if (s1+l1 <= s2)
  2619. return 0;
  2620. if (s2+l2 <= s1)
  2621. return 0;
  2622. return 1;
  2623. }
  2624. static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
  2625. {
  2626. unsigned long long blocks;
  2627. sector_t new;
  2628. if (kstrtoull(buf, 10, &blocks) < 0)
  2629. return -EINVAL;
  2630. if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
  2631. return -EINVAL; /* sector conversion overflow */
  2632. new = blocks * 2;
  2633. if (new != blocks * 2)
  2634. return -EINVAL; /* unsigned long long to sector_t overflow */
  2635. *sectors = new;
  2636. return 0;
  2637. }
  2638. static ssize_t
  2639. rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
  2640. {
  2641. struct mddev *my_mddev = rdev->mddev;
  2642. sector_t oldsectors = rdev->sectors;
  2643. sector_t sectors;
  2644. if (strict_blocks_to_sectors(buf, &sectors) < 0)
  2645. return -EINVAL;
  2646. if (rdev->data_offset != rdev->new_data_offset)
  2647. return -EINVAL; /* too confusing */
  2648. if (my_mddev->pers && rdev->raid_disk >= 0) {
  2649. if (my_mddev->persistent) {
  2650. sectors = super_types[my_mddev->major_version].
  2651. rdev_size_change(rdev, sectors);
  2652. if (!sectors)
  2653. return -EBUSY;
  2654. } else if (!sectors)
  2655. sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
  2656. rdev->data_offset;
  2657. if (!my_mddev->pers->resize)
  2658. /* Cannot change size for RAID0 or Linear etc */
  2659. return -EINVAL;
  2660. }
  2661. if (sectors < my_mddev->dev_sectors)
  2662. return -EINVAL; /* component must fit device */
  2663. rdev->sectors = sectors;
  2664. if (sectors > oldsectors && my_mddev->external) {
  2665. /* need to check that all other rdevs with the same ->bdev
  2666. * do not overlap. We need to unlock the mddev to avoid
  2667. * a deadlock. We have already changed rdev->sectors, and if
  2668. * we have to change it back, we will have the lock again.
  2669. */
  2670. struct mddev *mddev;
  2671. int overlap = 0;
  2672. struct list_head *tmp;
  2673. mddev_unlock(my_mddev);
  2674. for_each_mddev(mddev, tmp) {
  2675. struct md_rdev *rdev2;
  2676. mddev_lock(mddev);
  2677. rdev_for_each(rdev2, mddev)
  2678. if (rdev->bdev == rdev2->bdev &&
  2679. rdev != rdev2 &&
  2680. overlaps(rdev->data_offset, rdev->sectors,
  2681. rdev2->data_offset,
  2682. rdev2->sectors)) {
  2683. overlap = 1;
  2684. break;
  2685. }
  2686. mddev_unlock(mddev);
  2687. if (overlap) {
  2688. mddev_put(mddev);
  2689. break;
  2690. }
  2691. }
  2692. mddev_lock(my_mddev);
  2693. if (overlap) {
  2694. /* Someone else could have slipped in a size
  2695. * change here, but doing so is just silly.
  2696. * We put oldsectors back because we *know* it is
  2697. * safe, and trust userspace not to race with
  2698. * itself
  2699. */
  2700. rdev->sectors = oldsectors;
  2701. return -EBUSY;
  2702. }
  2703. }
  2704. return len;
  2705. }
  2706. static struct rdev_sysfs_entry rdev_size =
  2707. __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
  2708. static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
  2709. {
  2710. unsigned long long recovery_start = rdev->recovery_offset;
  2711. if (test_bit(In_sync, &rdev->flags) ||
  2712. recovery_start == MaxSector)
  2713. return sprintf(page, "none\n");
  2714. return sprintf(page, "%llu\n", recovery_start);
  2715. }
  2716. static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
  2717. {
  2718. unsigned long long recovery_start;
  2719. if (cmd_match(buf, "none"))
  2720. recovery_start = MaxSector;
  2721. else if (kstrtoull(buf, 10, &recovery_start))
  2722. return -EINVAL;
  2723. if (rdev->mddev->pers &&
  2724. rdev->raid_disk >= 0)
  2725. return -EBUSY;
  2726. rdev->recovery_offset = recovery_start;
  2727. if (recovery_start == MaxSector)
  2728. set_bit(In_sync, &rdev->flags);
  2729. else
  2730. clear_bit(In_sync, &rdev->flags);
  2731. return len;
  2732. }
  2733. static struct rdev_sysfs_entry rdev_recovery_start =
  2734. __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
  2735. static ssize_t
  2736. badblocks_show(struct badblocks *bb, char *page, int unack);
  2737. static ssize_t
  2738. badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
  2739. static ssize_t bb_show(struct md_rdev *rdev, char *page)
  2740. {
  2741. return badblocks_show(&rdev->badblocks, page, 0);
  2742. }
  2743. static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
  2744. {
  2745. int rv = badblocks_store(&rdev->badblocks, page, len, 0);
  2746. /* Maybe that ack was all we needed */
  2747. if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
  2748. wake_up(&rdev->blocked_wait);
  2749. return rv;
  2750. }
  2751. static struct rdev_sysfs_entry rdev_bad_blocks =
  2752. __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
  2753. static ssize_t ubb_show(struct md_rdev *rdev, char *page)
  2754. {
  2755. return badblocks_show(&rdev->badblocks, page, 1);
  2756. }
  2757. static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
  2758. {
  2759. return badblocks_store(&rdev->badblocks, page, len, 1);
  2760. }
  2761. static struct rdev_sysfs_entry rdev_unack_bad_blocks =
  2762. __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
  2763. static struct attribute *rdev_default_attrs[] = {
  2764. &rdev_state.attr,
  2765. &rdev_errors.attr,
  2766. &rdev_slot.attr,
  2767. &rdev_offset.attr,
  2768. &rdev_new_offset.attr,
  2769. &rdev_size.attr,
  2770. &rdev_recovery_start.attr,
  2771. &rdev_bad_blocks.attr,
  2772. &rdev_unack_bad_blocks.attr,
  2773. NULL,
  2774. };
  2775. static ssize_t
  2776. rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  2777. {
  2778. struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
  2779. struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
  2780. struct mddev *mddev = rdev->mddev;
  2781. ssize_t rv;
  2782. if (!entry->show)
  2783. return -EIO;
  2784. rv = mddev ? mddev_lock(mddev) : -EBUSY;
  2785. if (!rv) {
  2786. if (rdev->mddev == NULL)
  2787. rv = -EBUSY;
  2788. else
  2789. rv = entry->show(rdev, page);
  2790. mddev_unlock(mddev);
  2791. }
  2792. return rv;
  2793. }
  2794. static ssize_t
  2795. rdev_attr_store(struct kobject *kobj, struct attribute *attr,
  2796. const char *page, size_t length)
  2797. {
  2798. struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
  2799. struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
  2800. ssize_t rv;
  2801. struct mddev *mddev = rdev->mddev;
  2802. if (!entry->store)
  2803. return -EIO;
  2804. if (!capable(CAP_SYS_ADMIN))
  2805. return -EACCES;
  2806. rv = mddev ? mddev_lock(mddev): -EBUSY;
  2807. if (!rv) {
  2808. if (rdev->mddev == NULL)
  2809. rv = -EBUSY;
  2810. else
  2811. rv = entry->store(rdev, page, length);
  2812. mddev_unlock(mddev);
  2813. }
  2814. return rv;
  2815. }
  2816. static void rdev_free(struct kobject *ko)
  2817. {
  2818. struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
  2819. kfree(rdev);
  2820. }
  2821. static const struct sysfs_ops rdev_sysfs_ops = {
  2822. .show = rdev_attr_show,
  2823. .store = rdev_attr_store,
  2824. };
  2825. static struct kobj_type rdev_ktype = {
  2826. .release = rdev_free,
  2827. .sysfs_ops = &rdev_sysfs_ops,
  2828. .default_attrs = rdev_default_attrs,
  2829. };
  2830. int md_rdev_init(struct md_rdev *rdev)
  2831. {
  2832. rdev->desc_nr = -1;
  2833. rdev->saved_raid_disk = -1;
  2834. rdev->raid_disk = -1;
  2835. rdev->flags = 0;
  2836. rdev->data_offset = 0;
  2837. rdev->new_data_offset = 0;
  2838. rdev->sb_events = 0;
  2839. rdev->last_read_error.tv_sec = 0;
  2840. rdev->last_read_error.tv_nsec = 0;
  2841. rdev->sb_loaded = 0;
  2842. rdev->bb_page = NULL;
  2843. atomic_set(&rdev->nr_pending, 0);
  2844. atomic_set(&rdev->read_errors, 0);
  2845. atomic_set(&rdev->corrected_errors, 0);
  2846. INIT_LIST_HEAD(&rdev->same_set);
  2847. init_waitqueue_head(&rdev->blocked_wait);
  2848. /* Add space to store bad block list.
  2849. * This reserves the space even on arrays where it cannot
  2850. * be used - I wonder if that matters
  2851. */
  2852. rdev->badblocks.count = 0;
  2853. rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
  2854. rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
  2855. seqlock_init(&rdev->badblocks.lock);
  2856. if (rdev->badblocks.page == NULL)
  2857. return -ENOMEM;
  2858. return 0;
  2859. }
  2860. EXPORT_SYMBOL_GPL(md_rdev_init);
  2861. /*
  2862. * Import a device. If 'super_format' >= 0, then sanity check the superblock
  2863. *
  2864. * mark the device faulty if:
  2865. *
  2866. * - the device is nonexistent (zero size)
  2867. * - the device has no valid superblock
  2868. *
  2869. * a faulty rdev _never_ has rdev->sb set.
  2870. */
  2871. static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
  2872. {
  2873. char b[BDEVNAME_SIZE];
  2874. int err;
  2875. struct md_rdev *rdev;
  2876. sector_t size;
  2877. rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
  2878. if (!rdev) {
  2879. printk(KERN_ERR "md: could not alloc mem for new device!\n");
  2880. return ERR_PTR(-ENOMEM);
  2881. }
  2882. err = md_rdev_init(rdev);
  2883. if (err)
  2884. goto abort_free;
  2885. err = alloc_disk_sb(rdev);
  2886. if (err)
  2887. goto abort_free;
  2888. err = lock_rdev(rdev, newdev, super_format == -2);
  2889. if (err)
  2890. goto abort_free;
  2891. kobject_init(&rdev->kobj, &rdev_ktype);
  2892. size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
  2893. if (!size) {
  2894. printk(KERN_WARNING
  2895. "md: %s has zero or unknown size, marking faulty!\n",
  2896. bdevname(rdev->bdev,b));
  2897. err = -EINVAL;
  2898. goto abort_free;
  2899. }
  2900. if (super_format >= 0) {
  2901. err = super_types[super_format].
  2902. load_super(rdev, NULL, super_minor);
  2903. if (err == -EINVAL) {
  2904. printk(KERN_WARNING
  2905. "md: %s does not have a valid v%d.%d "
  2906. "superblock, not importing!\n",
  2907. bdevname(rdev->bdev,b),
  2908. super_format, super_minor);
  2909. goto abort_free;
  2910. }
  2911. if (err < 0) {
  2912. printk(KERN_WARNING
  2913. "md: could not read %s's sb, not importing!\n",
  2914. bdevname(rdev->bdev,b));
  2915. goto abort_free;
  2916. }
  2917. }
  2918. return rdev;
  2919. abort_free:
  2920. if (rdev->bdev)
  2921. unlock_rdev(rdev);
  2922. md_rdev_clear(rdev);
  2923. kfree(rdev);
  2924. return ERR_PTR(err);
  2925. }
  2926. /*
  2927. * Check a full RAID array for plausibility
  2928. */
  2929. static void analyze_sbs(struct mddev * mddev)
  2930. {
  2931. int i;
  2932. struct md_rdev *rdev, *freshest, *tmp;
  2933. char b[BDEVNAME_SIZE];
  2934. freshest = NULL;
  2935. rdev_for_each_safe(rdev, tmp, mddev)
  2936. switch (super_types[mddev->major_version].
  2937. load_super(rdev, freshest, mddev->minor_version)) {
  2938. case 1:
  2939. freshest = rdev;
  2940. break;
  2941. case 0:
  2942. break;
  2943. default:
  2944. printk( KERN_ERR \
  2945. "md: fatal superblock inconsistency in %s"
  2946. " -- removing from array\n",
  2947. bdevname(rdev->bdev,b));
  2948. kick_rdev_from_array(rdev);
  2949. }
  2950. super_types[mddev->major_version].
  2951. validate_super(mddev, freshest);
  2952. i = 0;
  2953. rdev_for_each_safe(rdev, tmp, mddev) {
  2954. if (mddev->max_disks &&
  2955. (rdev->desc_nr >= mddev->max_disks ||
  2956. i > mddev->max_disks)) {
  2957. printk(KERN_WARNING
  2958. "md: %s: %s: only %d devices permitted\n",
  2959. mdname(mddev), bdevname(rdev->bdev, b),
  2960. mddev->max_disks);
  2961. kick_rdev_from_array(rdev);
  2962. continue;
  2963. }
  2964. if (rdev != freshest)
  2965. if (super_types[mddev->major_version].
  2966. validate_super(mddev, rdev)) {
  2967. printk(KERN_WARNING "md: kicking non-fresh %s"
  2968. " from array!\n",
  2969. bdevname(rdev->bdev,b));
  2970. kick_rdev_from_array(rdev);
  2971. continue;
  2972. }
  2973. if (mddev->level == LEVEL_MULTIPATH) {
  2974. rdev->desc_nr = i++;
  2975. rdev->raid_disk = rdev->desc_nr;
  2976. set_bit(In_sync, &rdev->flags);
  2977. } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
  2978. rdev->raid_disk = -1;
  2979. clear_bit(In_sync, &rdev->flags);
  2980. }
  2981. }
  2982. }
  2983. /* Read a fixed-point number.
  2984. * Numbers in sysfs attributes should be in "standard" units where
  2985. * possible, so time should be in seconds.
  2986. * However we internally use a a much smaller unit such as
  2987. * milliseconds or jiffies.
  2988. * This function takes a decimal number with a possible fractional
  2989. * component, and produces an integer which is the result of
  2990. * multiplying that number by 10^'scale'.
  2991. * all without any floating-point arithmetic.
  2992. */
  2993. int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
  2994. {
  2995. unsigned long result = 0;
  2996. long decimals = -1;
  2997. while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
  2998. if (*cp == '.')
  2999. decimals = 0;
  3000. else if (decimals < scale) {
  3001. unsigned int value;
  3002. value = *cp - '0';
  3003. result = result * 10 + value;
  3004. if (decimals >= 0)
  3005. decimals++;
  3006. }
  3007. cp++;
  3008. }
  3009. if (*cp == '\n')
  3010. cp++;
  3011. if (*cp)
  3012. return -EINVAL;
  3013. if (decimals < 0)
  3014. decimals = 0;
  3015. while (decimals < scale) {
  3016. result *= 10;
  3017. decimals ++;
  3018. }
  3019. *res = result;
  3020. return 0;
  3021. }
  3022. static void md_safemode_timeout(unsigned long data);
  3023. static ssize_t
  3024. safe_delay_show(struct mddev *mddev, char *page)
  3025. {
  3026. int msec = (mddev->safemode_delay*1000)/HZ;
  3027. return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
  3028. }
  3029. static ssize_t
  3030. safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
  3031. {
  3032. unsigned long msec;
  3033. if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
  3034. return -EINVAL;
  3035. if (msec == 0)
  3036. mddev->safemode_delay = 0;
  3037. else {
  3038. unsigned long old_delay = mddev->safemode_delay;
  3039. mddev->safemode_delay = (msec*HZ)/1000;
  3040. if (mddev->safemode_delay == 0)
  3041. mddev->safemode_delay = 1;
  3042. if (mddev->safemode_delay < old_delay || old_delay == 0)
  3043. md_safemode_timeout((unsigned long)mddev);
  3044. }
  3045. return len;
  3046. }
  3047. static struct md_sysfs_entry md_safe_delay =
  3048. __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
  3049. static ssize_t
  3050. level_show(struct mddev *mddev, char *page)
  3051. {
  3052. struct md_personality *p = mddev->pers;
  3053. if (p)
  3054. return sprintf(page, "%s\n", p->name);
  3055. else if (mddev->clevel[0])
  3056. return sprintf(page, "%s\n", mddev->clevel);
  3057. else if (mddev->level != LEVEL_NONE)
  3058. return sprintf(page, "%d\n", mddev->level);
  3059. else
  3060. return 0;
  3061. }
  3062. static ssize_t
  3063. level_store(struct mddev *mddev, const char *buf, size_t len)
  3064. {
  3065. char clevel[16];
  3066. ssize_t rv = len;
  3067. struct md_personality *pers;
  3068. long level;
  3069. void *priv;
  3070. struct md_rdev *rdev;
  3071. if (mddev->pers == NULL) {
  3072. if (len == 0)
  3073. return 0;
  3074. if (len >= sizeof(mddev->clevel))
  3075. return -ENOSPC;
  3076. strncpy(mddev->clevel, buf, len);
  3077. if (mddev->clevel[len-1] == '\n')
  3078. len--;
  3079. mddev->clevel[len] = 0;
  3080. mddev->level = LEVEL_NONE;
  3081. return rv;
  3082. }
  3083. /* request to change the personality. Need to ensure:
  3084. * - array is not engaged in resync/recovery/reshape
  3085. * - old personality can be suspended
  3086. * - new personality will access other array.
  3087. */
  3088. if (mddev->sync_thread ||
  3089. mddev->reshape_position != MaxSector ||
  3090. mddev->sysfs_active)
  3091. return -EBUSY;
  3092. if (!mddev->pers->quiesce) {
  3093. printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
  3094. mdname(mddev), mddev->pers->name);
  3095. return -EINVAL;
  3096. }
  3097. /* Now find the new personality */
  3098. if (len == 0 || len >= sizeof(clevel))
  3099. return -EINVAL;
  3100. strncpy(clevel, buf, len);
  3101. if (clevel[len-1] == '\n')
  3102. len--;
  3103. clevel[len] = 0;
  3104. if (kstrtol(clevel, 10, &level))
  3105. level = LEVEL_NONE;
  3106. if (request_module("md-%s", clevel) != 0)
  3107. request_module("md-level-%s", clevel);
  3108. spin_lock(&pers_lock);
  3109. pers = find_pers(level, clevel);
  3110. if (!pers || !try_module_get(pers->owner)) {
  3111. spin_unlock(&pers_lock);
  3112. printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
  3113. return -EINVAL;
  3114. }
  3115. spin_unlock(&pers_lock);
  3116. if (pers == mddev->pers) {
  3117. /* Nothing to do! */
  3118. module_put(pers->owner);
  3119. return rv;
  3120. }
  3121. if (!pers->takeover) {
  3122. module_put(pers->owner);
  3123. printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
  3124. mdname(mddev), clevel);
  3125. return -EINVAL;
  3126. }
  3127. rdev_for_each(rdev, mddev)
  3128. rdev->new_raid_disk = rdev->raid_disk;
  3129. /* ->takeover must set new_* and/or delta_disks
  3130. * if it succeeds, and may set them when it fails.
  3131. */
  3132. priv = pers->takeover(mddev);
  3133. if (IS_ERR(priv)) {
  3134. mddev->new_level = mddev->level;
  3135. mddev->new_layout = mddev->layout;
  3136. mddev->new_chunk_sectors = mddev->chunk_sectors;
  3137. mddev->raid_disks -= mddev->delta_disks;
  3138. mddev->delta_disks = 0;
  3139. mddev->reshape_backwards = 0;
  3140. module_put(pers->owner);
  3141. printk(KERN_WARNING "md: %s: %s would not accept array\n",
  3142. mdname(mddev), clevel);
  3143. return PTR_ERR(priv);
  3144. }
  3145. /* Looks like we have a winner */
  3146. mddev_suspend(mddev);
  3147. mddev->pers->stop(mddev);
  3148. if (mddev->pers->sync_request == NULL &&
  3149. pers->sync_request != NULL) {
  3150. /* need to add the md_redundancy_group */
  3151. if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
  3152. printk(KERN_WARNING
  3153. "md: cannot register extra attributes for %s\n",
  3154. mdname(mddev));
  3155. mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
  3156. }
  3157. if (mddev->pers->sync_request != NULL &&
  3158. pers->sync_request == NULL) {
  3159. /* need to remove the md_redundancy_group */
  3160. if (mddev->to_remove == NULL)
  3161. mddev->to_remove = &md_redundancy_group;
  3162. }
  3163. if (mddev->pers->sync_request == NULL &&
  3164. mddev->external) {
  3165. /* We are converting from a no-redundancy array
  3166. * to a redundancy array and metadata is managed
  3167. * externally so we need to be sure that writes
  3168. * won't block due to a need to transition
  3169. * clean->dirty
  3170. * until external management is started.
  3171. */
  3172. mddev->in_sync = 0;
  3173. mddev->safemode_delay = 0;
  3174. mddev->safemode = 0;
  3175. }
  3176. rdev_for_each(rdev, mddev) {
  3177. if (rdev->raid_disk < 0)
  3178. continue;
  3179. if (rdev->new_raid_disk >= mddev->raid_disks)
  3180. rdev->new_raid_disk = -1;
  3181. if (rdev->new_raid_disk == rdev->raid_disk)
  3182. continue;
  3183. sysfs_unlink_rdev(mddev, rdev);
  3184. }
  3185. rdev_for_each(rdev, mddev) {
  3186. if (rdev->raid_disk < 0)
  3187. continue;
  3188. if (rdev->new_raid_disk == rdev->raid_disk)
  3189. continue;
  3190. rdev->raid_disk = rdev->new_raid_disk;
  3191. if (rdev->raid_disk < 0)
  3192. clear_bit(In_sync, &rdev->flags);
  3193. else {
  3194. if (sysfs_link_rdev(mddev, rdev))
  3195. printk(KERN_WARNING "md: cannot register rd%d"
  3196. " for %s after level change\n",
  3197. rdev->raid_disk, mdname(mddev));
  3198. }
  3199. }
  3200. module_put(mddev->pers->owner);
  3201. mddev->pers = pers;
  3202. mddev->private = priv;
  3203. strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
  3204. mddev->level = mddev->new_level;
  3205. mddev->layout = mddev->new_layout;
  3206. mddev->chunk_sectors = mddev->new_chunk_sectors;
  3207. mddev->delta_disks = 0;
  3208. mddev->reshape_backwards = 0;
  3209. mddev->degraded = 0;
  3210. if (mddev->pers->sync_request == NULL) {
  3211. /* this is now an array without redundancy, so
  3212. * it must always be in_sync
  3213. */
  3214. mddev->in_sync = 1;
  3215. del_timer_sync(&mddev->safemode_timer);
  3216. }
  3217. pers->run(mddev);
  3218. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  3219. mddev_resume(mddev);
  3220. sysfs_notify(&mddev->kobj, NULL, "level");
  3221. md_new_event(mddev);
  3222. return rv;
  3223. }
  3224. static struct md_sysfs_entry md_level =
  3225. __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
  3226. static ssize_t
  3227. layout_show(struct mddev *mddev, char *page)
  3228. {
  3229. /* just a number, not meaningful for all levels */
  3230. if (mddev->reshape_position != MaxSector &&
  3231. mddev->layout != mddev->new_layout)
  3232. return sprintf(page, "%d (%d)\n",
  3233. mddev->new_layout, mddev->layout);
  3234. return sprintf(page, "%d\n", mddev->layout);
  3235. }
  3236. static ssize_t
  3237. layout_store(struct mddev *mddev, const char *buf, size_t len)
  3238. {
  3239. char *e;
  3240. unsigned long n = simple_strtoul(buf, &e, 10);
  3241. if (!*buf || (*e && *e != '\n'))
  3242. return -EINVAL;
  3243. if (mddev->pers) {
  3244. int err;
  3245. if (mddev->pers->check_reshape == NULL)
  3246. return -EBUSY;
  3247. mddev->new_layout = n;
  3248. err = mddev->pers->check_reshape(mddev);
  3249. if (err) {
  3250. mddev->new_layout = mddev->layout;
  3251. return err;
  3252. }
  3253. } else {
  3254. mddev->new_layout = n;
  3255. if (mddev->reshape_position == MaxSector)
  3256. mddev->layout = n;
  3257. }
  3258. return len;
  3259. }
  3260. static struct md_sysfs_entry md_layout =
  3261. __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
  3262. static ssize_t
  3263. raid_disks_show(struct mddev *mddev, char *page)
  3264. {
  3265. if (mddev->raid_disks == 0)
  3266. return 0;
  3267. if (mddev->reshape_position != MaxSector &&
  3268. mddev->delta_disks != 0)
  3269. return sprintf(page, "%d (%d)\n", mddev->raid_disks,
  3270. mddev->raid_disks - mddev->delta_disks);
  3271. return sprintf(page, "%d\n", mddev->raid_disks);
  3272. }
  3273. static int update_raid_disks(struct mddev *mddev, int raid_disks);
  3274. static ssize_t
  3275. raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
  3276. {
  3277. char *e;
  3278. int rv = 0;
  3279. unsigned long n = simple_strtoul(buf, &e, 10);
  3280. if (!*buf || (*e && *e != '\n'))
  3281. return -EINVAL;
  3282. if (mddev->pers)
  3283. rv = update_raid_disks(mddev, n);
  3284. else if (mddev->reshape_position != MaxSector) {
  3285. struct md_rdev *rdev;
  3286. int olddisks = mddev->raid_disks - mddev->delta_disks;
  3287. rdev_for_each(rdev, mddev) {
  3288. if (olddisks < n &&
  3289. rdev->data_offset < rdev->new_data_offset)
  3290. return -EINVAL;
  3291. if (olddisks > n &&
  3292. rdev->data_offset > rdev->new_data_offset)
  3293. return -EINVAL;
  3294. }
  3295. mddev->delta_disks = n - olddisks;
  3296. mddev->raid_disks = n;
  3297. mddev->reshape_backwards = (mddev->delta_disks < 0);
  3298. } else
  3299. mddev->raid_disks = n;
  3300. return rv ? rv : len;
  3301. }
  3302. static struct md_sysfs_entry md_raid_disks =
  3303. __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
  3304. static ssize_t
  3305. chunk_size_show(struct mddev *mddev, char *page)
  3306. {
  3307. if (mddev->reshape_position != MaxSector &&
  3308. mddev->chunk_sectors != mddev->new_chunk_sectors)
  3309. return sprintf(page, "%d (%d)\n",
  3310. mddev->new_chunk_sectors << 9,
  3311. mddev->chunk_sectors << 9);
  3312. return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
  3313. }
  3314. static ssize_t
  3315. chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
  3316. {
  3317. char *e;
  3318. unsigned long n = simple_strtoul(buf, &e, 10);
  3319. if (!*buf || (*e && *e != '\n'))
  3320. return -EINVAL;
  3321. if (mddev->pers) {
  3322. int err;
  3323. if (mddev->pers->check_reshape == NULL)
  3324. return -EBUSY;
  3325. mddev->new_chunk_sectors = n >> 9;
  3326. err = mddev->pers->check_reshape(mddev);
  3327. if (err) {
  3328. mddev->new_chunk_sectors = mddev->chunk_sectors;
  3329. return err;
  3330. }
  3331. } else {
  3332. mddev->new_chunk_sectors = n >> 9;
  3333. if (mddev->reshape_position == MaxSector)
  3334. mddev->chunk_sectors = n >> 9;
  3335. }
  3336. return len;
  3337. }
  3338. static struct md_sysfs_entry md_chunk_size =
  3339. __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
  3340. static ssize_t
  3341. resync_start_show(struct mddev *mddev, char *page)
  3342. {
  3343. if (mddev->recovery_cp == MaxSector)
  3344. return sprintf(page, "none\n");
  3345. return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
  3346. }
  3347. static ssize_t
  3348. resync_start_store(struct mddev *mddev, const char *buf, size_t len)
  3349. {
  3350. char *e;
  3351. unsigned long long n = simple_strtoull(buf, &e, 10);
  3352. if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
  3353. return -EBUSY;
  3354. if (cmd_match(buf, "none"))
  3355. n = MaxSector;
  3356. else if (!*buf || (*e && *e != '\n'))
  3357. return -EINVAL;
  3358. mddev->recovery_cp = n;
  3359. if (mddev->pers)
  3360. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  3361. return len;
  3362. }
  3363. static struct md_sysfs_entry md_resync_start =
  3364. __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
  3365. /*
  3366. * The array state can be:
  3367. *
  3368. * clear
  3369. * No devices, no size, no level
  3370. * Equivalent to STOP_ARRAY ioctl
  3371. * inactive
  3372. * May have some settings, but array is not active
  3373. * all IO results in error
  3374. * When written, doesn't tear down array, but just stops it
  3375. * suspended (not supported yet)
  3376. * All IO requests will block. The array can be reconfigured.
  3377. * Writing this, if accepted, will block until array is quiescent
  3378. * readonly
  3379. * no resync can happen. no superblocks get written.
  3380. * write requests fail
  3381. * read-auto
  3382. * like readonly, but behaves like 'clean' on a write request.
  3383. *
  3384. * clean - no pending writes, but otherwise active.
  3385. * When written to inactive array, starts without resync
  3386. * If a write request arrives then
  3387. * if metadata is known, mark 'dirty' and switch to 'active'.
  3388. * if not known, block and switch to write-pending
  3389. * If written to an active array that has pending writes, then fails.
  3390. * active
  3391. * fully active: IO and resync can be happening.
  3392. * When written to inactive array, starts with resync
  3393. *
  3394. * write-pending
  3395. * clean, but writes are blocked waiting for 'active' to be written.
  3396. *
  3397. * active-idle
  3398. * like active, but no writes have been seen for a while (100msec).
  3399. *
  3400. */
  3401. enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
  3402. write_pending, active_idle, bad_word};
  3403. static char *array_states[] = {
  3404. "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
  3405. "write-pending", "active-idle", NULL };
  3406. static int match_word(const char *word, char **list)
  3407. {
  3408. int n;
  3409. for (n=0; list[n]; n++)
  3410. if (cmd_match(word, list[n]))
  3411. break;
  3412. return n;
  3413. }
  3414. static ssize_t
  3415. array_state_show(struct mddev *mddev, char *page)
  3416. {
  3417. enum array_state st = inactive;
  3418. if (mddev->pers)
  3419. switch(mddev->ro) {
  3420. case 1:
  3421. st = readonly;
  3422. break;
  3423. case 2:
  3424. st = read_auto;
  3425. break;
  3426. case 0:
  3427. if (mddev->in_sync)
  3428. st = clean;
  3429. else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
  3430. st = write_pending;
  3431. else if (mddev->safemode)
  3432. st = active_idle;
  3433. else
  3434. st = active;
  3435. }
  3436. else {
  3437. if (list_empty(&mddev->disks) &&
  3438. mddev->raid_disks == 0 &&
  3439. mddev->dev_sectors == 0)
  3440. st = clear;
  3441. else
  3442. st = inactive;
  3443. }
  3444. return sprintf(page, "%s\n", array_states[st]);
  3445. }
  3446. static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
  3447. static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
  3448. static int do_md_run(struct mddev * mddev);
  3449. static int restart_array(struct mddev *mddev);
  3450. static ssize_t
  3451. array_state_store(struct mddev *mddev, const char *buf, size_t len)
  3452. {
  3453. int err = -EINVAL;
  3454. enum array_state st = match_word(buf, array_states);
  3455. switch(st) {
  3456. case bad_word:
  3457. break;
  3458. case clear:
  3459. /* stopping an active array */
  3460. err = do_md_stop(mddev, 0, NULL);
  3461. break;
  3462. case inactive:
  3463. /* stopping an active array */
  3464. if (mddev->pers)
  3465. err = do_md_stop(mddev, 2, NULL);
  3466. else
  3467. err = 0; /* already inactive */
  3468. break;
  3469. case suspended:
  3470. break; /* not supported yet */
  3471. case readonly:
  3472. if (mddev->pers)
  3473. err = md_set_readonly(mddev, NULL);
  3474. else {
  3475. mddev->ro = 1;
  3476. set_disk_ro(mddev->gendisk, 1);
  3477. err = do_md_run(mddev);
  3478. }
  3479. break;
  3480. case read_auto:
  3481. if (mddev->pers) {
  3482. if (mddev->ro == 0)
  3483. err = md_set_readonly(mddev, NULL);
  3484. else if (mddev->ro == 1)
  3485. err = restart_array(mddev);
  3486. if (err == 0) {
  3487. mddev->ro = 2;
  3488. set_disk_ro(mddev->gendisk, 0);
  3489. }
  3490. } else {
  3491. mddev->ro = 2;
  3492. err = do_md_run(mddev);
  3493. }
  3494. break;
  3495. case clean:
  3496. if (mddev->pers) {
  3497. restart_array(mddev);
  3498. spin_lock_irq(&mddev->write_lock);
  3499. if (atomic_read(&mddev->writes_pending) == 0) {
  3500. if (mddev->in_sync == 0) {
  3501. mddev->in_sync = 1;
  3502. if (mddev->safemode == 1)
  3503. mddev->safemode = 0;
  3504. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  3505. }
  3506. err = 0;
  3507. } else
  3508. err = -EBUSY;
  3509. spin_unlock_irq(&mddev->write_lock);
  3510. } else
  3511. err = -EINVAL;
  3512. break;
  3513. case active:
  3514. if (mddev->pers) {
  3515. restart_array(mddev);
  3516. clear_bit(MD_CHANGE_PENDING, &mddev->flags);
  3517. wake_up(&mddev->sb_wait);
  3518. err = 0;
  3519. } else {
  3520. mddev->ro = 0;
  3521. set_disk_ro(mddev->gendisk, 0);
  3522. err = do_md_run(mddev);
  3523. }
  3524. break;
  3525. case write_pending:
  3526. case active_idle:
  3527. /* these cannot be set */
  3528. break;
  3529. }
  3530. if (err)
  3531. return err;
  3532. else {
  3533. if (mddev->hold_active == UNTIL_IOCTL)
  3534. mddev->hold_active = 0;
  3535. sysfs_notify_dirent_safe(mddev->sysfs_state);
  3536. return len;
  3537. }
  3538. }
  3539. static struct md_sysfs_entry md_array_state =
  3540. __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
  3541. static ssize_t
  3542. max_corrected_read_errors_show(struct mddev *mddev, char *page) {
  3543. return sprintf(page, "%d\n",
  3544. atomic_read(&mddev->max_corr_read_errors));
  3545. }
  3546. static ssize_t
  3547. max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
  3548. {
  3549. char *e;
  3550. unsigned long n = simple_strtoul(buf, &e, 10);
  3551. if (*buf && (*e == 0 || *e == '\n')) {
  3552. atomic_set(&mddev->max_corr_read_errors, n);
  3553. return len;
  3554. }
  3555. return -EINVAL;
  3556. }
  3557. static struct md_sysfs_entry max_corr_read_errors =
  3558. __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
  3559. max_corrected_read_errors_store);
  3560. static ssize_t
  3561. null_show(struct mddev *mddev, char *page)
  3562. {
  3563. return -EINVAL;
  3564. }
  3565. static ssize_t
  3566. new_dev_store(struct mddev *mddev, const char *buf, size_t len)
  3567. {
  3568. /* buf must be %d:%d\n? giving major and minor numbers */
  3569. /* The new device is added to the array.
  3570. * If the array has a persistent superblock, we read the
  3571. * superblock to initialise info and check validity.
  3572. * Otherwise, only checking done is that in bind_rdev_to_array,
  3573. * which mainly checks size.
  3574. */
  3575. char *e;
  3576. int major = simple_strtoul(buf, &e, 10);
  3577. int minor;
  3578. dev_t dev;
  3579. struct md_rdev *rdev;
  3580. int err;
  3581. if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
  3582. return -EINVAL;
  3583. minor = simple_strtoul(e+1, &e, 10);
  3584. if (*e && *e != '\n')
  3585. return -EINVAL;
  3586. dev = MKDEV(major, minor);
  3587. if (major != MAJOR(dev) ||
  3588. minor != MINOR(dev))
  3589. return -EOVERFLOW;
  3590. if (mddev->persistent) {
  3591. rdev = md_import_device(dev, mddev->major_version,
  3592. mddev->minor_version);
  3593. if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
  3594. struct md_rdev *rdev0
  3595. = list_entry(mddev->disks.next,
  3596. struct md_rdev, same_set);
  3597. err = super_types[mddev->major_version]
  3598. .load_super(rdev, rdev0, mddev->minor_version);
  3599. if (err < 0)
  3600. goto out;
  3601. }
  3602. } else if (mddev->external)
  3603. rdev = md_import_device(dev, -2, -1);
  3604. else
  3605. rdev = md_import_device(dev, -1, -1);
  3606. if (IS_ERR(rdev))
  3607. return PTR_ERR(rdev);
  3608. err = bind_rdev_to_array(rdev, mddev);
  3609. out:
  3610. if (err)
  3611. export_rdev(rdev);
  3612. return err ? err : len;
  3613. }
  3614. static struct md_sysfs_entry md_new_device =
  3615. __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
  3616. static ssize_t
  3617. bitmap_store(struct mddev *mddev, const char *buf, size_t len)
  3618. {
  3619. char *end;
  3620. unsigned long chunk, end_chunk;
  3621. if (!mddev->bitmap)
  3622. goto out;
  3623. /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
  3624. while (*buf) {
  3625. chunk = end_chunk = simple_strtoul(buf, &end, 0);
  3626. if (buf == end) break;
  3627. if (*end == '-') { /* range */
  3628. buf = end + 1;
  3629. end_chunk = simple_strtoul(buf, &end, 0);
  3630. if (buf == end) break;
  3631. }
  3632. if (*end && !isspace(*end)) break;
  3633. bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
  3634. buf = skip_spaces(end);
  3635. }
  3636. bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
  3637. out:
  3638. return len;
  3639. }
  3640. static struct md_sysfs_entry md_bitmap =
  3641. __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
  3642. static ssize_t
  3643. size_show(struct mddev *mddev, char *page)
  3644. {
  3645. return sprintf(page, "%llu\n",
  3646. (unsigned long long)mddev->dev_sectors / 2);
  3647. }
  3648. static int update_size(struct mddev *mddev, sector_t num_sectors);
  3649. static ssize_t
  3650. size_store(struct mddev *mddev, const char *buf, size_t len)
  3651. {
  3652. /* If array is inactive, we can reduce the component size, but
  3653. * not increase it (except from 0).
  3654. * If array is active, we can try an on-line resize
  3655. */
  3656. sector_t sectors;
  3657. int err = strict_blocks_to_sectors(buf, &sectors);
  3658. if (err < 0)
  3659. return err;
  3660. if (mddev->pers) {
  3661. err = update_size(mddev, sectors);
  3662. md_update_sb(mddev, 1);
  3663. } else {
  3664. if (mddev->dev_sectors == 0 ||
  3665. mddev->dev_sectors > sectors)
  3666. mddev->dev_sectors = sectors;
  3667. else
  3668. err = -ENOSPC;
  3669. }
  3670. return err ? err : len;
  3671. }
  3672. static struct md_sysfs_entry md_size =
  3673. __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
  3674. /* Metadata version.
  3675. * This is one of
  3676. * 'none' for arrays with no metadata (good luck...)
  3677. * 'external' for arrays with externally managed metadata,
  3678. * or N.M for internally known formats
  3679. */
  3680. static ssize_t
  3681. metadata_show(struct mddev *mddev, char *page)
  3682. {
  3683. if (mddev->persistent)
  3684. return sprintf(page, "%d.%d\n",
  3685. mddev->major_version, mddev->minor_version);
  3686. else if (mddev->external)
  3687. return sprintf(page, "external:%s\n", mddev->metadata_type);
  3688. else
  3689. return sprintf(page, "none\n");
  3690. }
  3691. static ssize_t
  3692. metadata_store(struct mddev *mddev, const char *buf, size_t len)
  3693. {
  3694. int major, minor;
  3695. char *e;
  3696. /* Changing the details of 'external' metadata is
  3697. * always permitted. Otherwise there must be
  3698. * no devices attached to the array.
  3699. */
  3700. if (mddev->external && strncmp(buf, "external:", 9) == 0)
  3701. ;
  3702. else if (!list_empty(&mddev->disks))
  3703. return -EBUSY;
  3704. if (cmd_match(buf, "none")) {
  3705. mddev->persistent = 0;
  3706. mddev->external = 0;
  3707. mddev->major_version = 0;
  3708. mddev->minor_version = 90;
  3709. return len;
  3710. }
  3711. if (strncmp(buf, "external:", 9) == 0) {
  3712. size_t namelen = len-9;
  3713. if (namelen >= sizeof(mddev->metadata_type))
  3714. namelen = sizeof(mddev->metadata_type)-1;
  3715. strncpy(mddev->metadata_type, buf+9, namelen);
  3716. mddev->metadata_type[namelen] = 0;
  3717. if (namelen && mddev->metadata_type[namelen-1] == '\n')
  3718. mddev->metadata_type[--namelen] = 0;
  3719. mddev->persistent = 0;
  3720. mddev->external = 1;
  3721. mddev->major_version = 0;
  3722. mddev->minor_version = 90;
  3723. return len;
  3724. }
  3725. major = simple_strtoul(buf, &e, 10);
  3726. if (e==buf || *e != '.')
  3727. return -EINVAL;
  3728. buf = e+1;
  3729. minor = simple_strtoul(buf, &e, 10);
  3730. if (e==buf || (*e && *e != '\n') )
  3731. return -EINVAL;
  3732. if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
  3733. return -ENOENT;
  3734. mddev->major_version = major;
  3735. mddev->minor_version = minor;
  3736. mddev->persistent = 1;
  3737. mddev->external = 0;
  3738. return len;
  3739. }
  3740. static struct md_sysfs_entry md_metadata =
  3741. __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
  3742. static ssize_t
  3743. action_show(struct mddev *mddev, char *page)
  3744. {
  3745. char *type = "idle";
  3746. if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
  3747. type = "frozen";
  3748. else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
  3749. (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
  3750. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  3751. type = "reshape";
  3752. else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  3753. if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  3754. type = "resync";
  3755. else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
  3756. type = "check";
  3757. else
  3758. type = "repair";
  3759. } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
  3760. type = "recover";
  3761. }
  3762. return sprintf(page, "%s\n", type);
  3763. }
  3764. static ssize_t
  3765. action_store(struct mddev *mddev, const char *page, size_t len)
  3766. {
  3767. if (!mddev->pers || !mddev->pers->sync_request)
  3768. return -EINVAL;
  3769. if (cmd_match(page, "frozen"))
  3770. set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  3771. else
  3772. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  3773. if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
  3774. if (mddev->sync_thread) {
  3775. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  3776. md_reap_sync_thread(mddev);
  3777. }
  3778. } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
  3779. test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
  3780. return -EBUSY;
  3781. else if (cmd_match(page, "resync"))
  3782. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  3783. else if (cmd_match(page, "recover")) {
  3784. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  3785. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  3786. } else if (cmd_match(page, "reshape")) {
  3787. int err;
  3788. if (mddev->pers->start_reshape == NULL)
  3789. return -EINVAL;
  3790. err = mddev->pers->start_reshape(mddev);
  3791. if (err)
  3792. return err;
  3793. sysfs_notify(&mddev->kobj, NULL, "degraded");
  3794. } else {
  3795. if (cmd_match(page, "check"))
  3796. set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  3797. else if (!cmd_match(page, "repair"))
  3798. return -EINVAL;
  3799. set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  3800. set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  3801. }
  3802. if (mddev->ro == 2) {
  3803. /* A write to sync_action is enough to justify
  3804. * canceling read-auto mode
  3805. */
  3806. mddev->ro = 0;
  3807. md_wakeup_thread(mddev->sync_thread);
  3808. }
  3809. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  3810. md_wakeup_thread(mddev->thread);
  3811. sysfs_notify_dirent_safe(mddev->sysfs_action);
  3812. return len;
  3813. }
  3814. static struct md_sysfs_entry md_scan_mode =
  3815. __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
  3816. static ssize_t
  3817. last_sync_action_show(struct mddev *mddev, char *page)
  3818. {
  3819. return sprintf(page, "%s\n", mddev->last_sync_action);
  3820. }
  3821. static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
  3822. static ssize_t
  3823. mismatch_cnt_show(struct mddev *mddev, char *page)
  3824. {
  3825. return sprintf(page, "%llu\n",
  3826. (unsigned long long)
  3827. atomic64_read(&mddev->resync_mismatches));
  3828. }
  3829. static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
  3830. static ssize_t
  3831. sync_min_show(struct mddev *mddev, char *page)
  3832. {
  3833. return sprintf(page, "%d (%s)\n", speed_min(mddev),
  3834. mddev->sync_speed_min ? "local": "system");
  3835. }
  3836. static ssize_t
  3837. sync_min_store(struct mddev *mddev, const char *buf, size_t len)
  3838. {
  3839. int min;
  3840. char *e;
  3841. if (strncmp(buf, "system", 6)==0) {
  3842. mddev->sync_speed_min = 0;
  3843. return len;
  3844. }
  3845. min = simple_strtoul(buf, &e, 10);
  3846. if (buf == e || (*e && *e != '\n') || min <= 0)
  3847. return -EINVAL;
  3848. mddev->sync_speed_min = min;
  3849. return len;
  3850. }
  3851. static struct md_sysfs_entry md_sync_min =
  3852. __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
  3853. static ssize_t
  3854. sync_max_show(struct mddev *mddev, char *page)
  3855. {
  3856. return sprintf(page, "%d (%s)\n", speed_max(mddev),
  3857. mddev->sync_speed_max ? "local": "system");
  3858. }
  3859. static ssize_t
  3860. sync_max_store(struct mddev *mddev, const char *buf, size_t len)
  3861. {
  3862. int max;
  3863. char *e;
  3864. if (strncmp(buf, "system", 6)==0) {
  3865. mddev->sync_speed_max = 0;
  3866. return len;
  3867. }
  3868. max = simple_strtoul(buf, &e, 10);
  3869. if (buf == e || (*e && *e != '\n') || max <= 0)
  3870. return -EINVAL;
  3871. mddev->sync_speed_max = max;
  3872. return len;
  3873. }
  3874. static struct md_sysfs_entry md_sync_max =
  3875. __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
  3876. static ssize_t
  3877. degraded_show(struct mddev *mddev, char *page)
  3878. {
  3879. return sprintf(page, "%d\n", mddev->degraded);
  3880. }
  3881. static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
  3882. static ssize_t
  3883. sync_force_parallel_show(struct mddev *mddev, char *page)
  3884. {
  3885. return sprintf(page, "%d\n", mddev->parallel_resync);
  3886. }
  3887. static ssize_t
  3888. sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
  3889. {
  3890. long n;
  3891. if (kstrtol(buf, 10, &n))
  3892. return -EINVAL;
  3893. if (n != 0 && n != 1)
  3894. return -EINVAL;
  3895. mddev->parallel_resync = n;
  3896. if (mddev->sync_thread)
  3897. wake_up(&resync_wait);
  3898. return len;
  3899. }
  3900. /* force parallel resync, even with shared block devices */
  3901. static struct md_sysfs_entry md_sync_force_parallel =
  3902. __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
  3903. sync_force_parallel_show, sync_force_parallel_store);
  3904. static ssize_t
  3905. sync_speed_show(struct mddev *mddev, char *page)
  3906. {
  3907. unsigned long resync, dt, db;
  3908. if (mddev->curr_resync == 0)
  3909. return sprintf(page, "none\n");
  3910. resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
  3911. dt = (jiffies - mddev->resync_mark) / HZ;
  3912. if (!dt) dt++;
  3913. db = resync - mddev->resync_mark_cnt;
  3914. return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
  3915. }
  3916. static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
  3917. static ssize_t
  3918. sync_completed_show(struct mddev *mddev, char *page)
  3919. {
  3920. unsigned long long max_sectors, resync;
  3921. if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  3922. return sprintf(page, "none\n");
  3923. if (mddev->curr_resync == 1 ||
  3924. mddev->curr_resync == 2)
  3925. return sprintf(page, "delayed\n");
  3926. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
  3927. test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  3928. max_sectors = mddev->resync_max_sectors;
  3929. else
  3930. max_sectors = mddev->dev_sectors;
  3931. resync = mddev->curr_resync_completed;
  3932. return sprintf(page, "%llu / %llu\n", resync, max_sectors);
  3933. }
  3934. static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
  3935. static ssize_t
  3936. min_sync_show(struct mddev *mddev, char *page)
  3937. {
  3938. return sprintf(page, "%llu\n",
  3939. (unsigned long long)mddev->resync_min);
  3940. }
  3941. static ssize_t
  3942. min_sync_store(struct mddev *mddev, const char *buf, size_t len)
  3943. {
  3944. unsigned long long min;
  3945. if (kstrtoull(buf, 10, &min))
  3946. return -EINVAL;
  3947. if (min > mddev->resync_max)
  3948. return -EINVAL;
  3949. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  3950. return -EBUSY;
  3951. /* Must be a multiple of chunk_size */
  3952. if (mddev->chunk_sectors) {
  3953. sector_t temp = min;
  3954. if (sector_div(temp, mddev->chunk_sectors))
  3955. return -EINVAL;
  3956. }
  3957. mddev->resync_min = min;
  3958. return len;
  3959. }
  3960. static struct md_sysfs_entry md_min_sync =
  3961. __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
  3962. static ssize_t
  3963. max_sync_show(struct mddev *mddev, char *page)
  3964. {
  3965. if (mddev->resync_max == MaxSector)
  3966. return sprintf(page, "max\n");
  3967. else
  3968. return sprintf(page, "%llu\n",
  3969. (unsigned long long)mddev->resync_max);
  3970. }
  3971. static ssize_t
  3972. max_sync_store(struct mddev *mddev, const char *buf, size_t len)
  3973. {
  3974. if (strncmp(buf, "max", 3) == 0)
  3975. mddev->resync_max = MaxSector;
  3976. else {
  3977. unsigned long long max;
  3978. if (kstrtoull(buf, 10, &max))
  3979. return -EINVAL;
  3980. if (max < mddev->resync_min)
  3981. return -EINVAL;
  3982. if (max < mddev->resync_max &&
  3983. mddev->ro == 0 &&
  3984. test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  3985. return -EBUSY;
  3986. /* Must be a multiple of chunk_size */
  3987. if (mddev->chunk_sectors) {
  3988. sector_t temp = max;
  3989. if (sector_div(temp, mddev->chunk_sectors))
  3990. return -EINVAL;
  3991. }
  3992. mddev->resync_max = max;
  3993. }
  3994. wake_up(&mddev->recovery_wait);
  3995. return len;
  3996. }
  3997. static struct md_sysfs_entry md_max_sync =
  3998. __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
  3999. static ssize_t
  4000. suspend_lo_show(struct mddev *mddev, char *page)
  4001. {
  4002. return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
  4003. }
  4004. static ssize_t
  4005. suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
  4006. {
  4007. char *e;
  4008. unsigned long long new = simple_strtoull(buf, &e, 10);
  4009. unsigned long long old = mddev->suspend_lo;
  4010. if (mddev->pers == NULL ||
  4011. mddev->pers->quiesce == NULL)
  4012. return -EINVAL;
  4013. if (buf == e || (*e && *e != '\n'))
  4014. return -EINVAL;
  4015. mddev->suspend_lo = new;
  4016. if (new >= old)
  4017. /* Shrinking suspended region */
  4018. mddev->pers->quiesce(mddev, 2);
  4019. else {
  4020. /* Expanding suspended region - need to wait */
  4021. mddev->pers->quiesce(mddev, 1);
  4022. mddev->pers->quiesce(mddev, 0);
  4023. }
  4024. return len;
  4025. }
  4026. static struct md_sysfs_entry md_suspend_lo =
  4027. __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
  4028. static ssize_t
  4029. suspend_hi_show(struct mddev *mddev, char *page)
  4030. {
  4031. return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
  4032. }
  4033. static ssize_t
  4034. suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
  4035. {
  4036. char *e;
  4037. unsigned long long new = simple_strtoull(buf, &e, 10);
  4038. unsigned long long old = mddev->suspend_hi;
  4039. if (mddev->pers == NULL ||
  4040. mddev->pers->quiesce == NULL)
  4041. return -EINVAL;
  4042. if (buf == e || (*e && *e != '\n'))
  4043. return -EINVAL;
  4044. mddev->suspend_hi = new;
  4045. if (new <= old)
  4046. /* Shrinking suspended region */
  4047. mddev->pers->quiesce(mddev, 2);
  4048. else {
  4049. /* Expanding suspended region - need to wait */
  4050. mddev->pers->quiesce(mddev, 1);
  4051. mddev->pers->quiesce(mddev, 0);
  4052. }
  4053. return len;
  4054. }
  4055. static struct md_sysfs_entry md_suspend_hi =
  4056. __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
  4057. static ssize_t
  4058. reshape_position_show(struct mddev *mddev, char *page)
  4059. {
  4060. if (mddev->reshape_position != MaxSector)
  4061. return sprintf(page, "%llu\n",
  4062. (unsigned long long)mddev->reshape_position);
  4063. strcpy(page, "none\n");
  4064. return 5;
  4065. }
  4066. static ssize_t
  4067. reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
  4068. {
  4069. struct md_rdev *rdev;
  4070. char *e;
  4071. unsigned long long new = simple_strtoull(buf, &e, 10);
  4072. if (mddev->pers)
  4073. return -EBUSY;
  4074. if (buf == e || (*e && *e != '\n'))
  4075. return -EINVAL;
  4076. mddev->reshape_position = new;
  4077. mddev->delta_disks = 0;
  4078. mddev->reshape_backwards = 0;
  4079. mddev->new_level = mddev->level;
  4080. mddev->new_layout = mddev->layout;
  4081. mddev->new_chunk_sectors = mddev->chunk_sectors;
  4082. rdev_for_each(rdev, mddev)
  4083. rdev->new_data_offset = rdev->data_offset;
  4084. return len;
  4085. }
  4086. static struct md_sysfs_entry md_reshape_position =
  4087. __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
  4088. reshape_position_store);
  4089. static ssize_t
  4090. reshape_direction_show(struct mddev *mddev, char *page)
  4091. {
  4092. return sprintf(page, "%s\n",
  4093. mddev->reshape_backwards ? "backwards" : "forwards");
  4094. }
  4095. static ssize_t
  4096. reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
  4097. {
  4098. int backwards = 0;
  4099. if (cmd_match(buf, "forwards"))
  4100. backwards = 0;
  4101. else if (cmd_match(buf, "backwards"))
  4102. backwards = 1;
  4103. else
  4104. return -EINVAL;
  4105. if (mddev->reshape_backwards == backwards)
  4106. return len;
  4107. /* check if we are allowed to change */
  4108. if (mddev->delta_disks)
  4109. return -EBUSY;
  4110. if (mddev->persistent &&
  4111. mddev->major_version == 0)
  4112. return -EINVAL;
  4113. mddev->reshape_backwards = backwards;
  4114. return len;
  4115. }
  4116. static struct md_sysfs_entry md_reshape_direction =
  4117. __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
  4118. reshape_direction_store);
  4119. static ssize_t
  4120. array_size_show(struct mddev *mddev, char *page)
  4121. {
  4122. if (mddev->external_size)
  4123. return sprintf(page, "%llu\n",
  4124. (unsigned long long)mddev->array_sectors/2);
  4125. else
  4126. return sprintf(page, "default\n");
  4127. }
  4128. static ssize_t
  4129. array_size_store(struct mddev *mddev, const char *buf, size_t len)
  4130. {
  4131. sector_t sectors;
  4132. if (strncmp(buf, "default", 7) == 0) {
  4133. if (mddev->pers)
  4134. sectors = mddev->pers->size(mddev, 0, 0);
  4135. else
  4136. sectors = mddev->array_sectors;
  4137. mddev->external_size = 0;
  4138. } else {
  4139. if (strict_blocks_to_sectors(buf, &sectors) < 0)
  4140. return -EINVAL;
  4141. if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
  4142. return -E2BIG;
  4143. mddev->external_size = 1;
  4144. }
  4145. mddev->array_sectors = sectors;
  4146. if (mddev->pers) {
  4147. set_capacity(mddev->gendisk, mddev->array_sectors);
  4148. revalidate_disk(mddev->gendisk);
  4149. }
  4150. return len;
  4151. }
  4152. static struct md_sysfs_entry md_array_size =
  4153. __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
  4154. array_size_store);
  4155. static struct attribute *md_default_attrs[] = {
  4156. &md_level.attr,
  4157. &md_layout.attr,
  4158. &md_raid_disks.attr,
  4159. &md_chunk_size.attr,
  4160. &md_size.attr,
  4161. &md_resync_start.attr,
  4162. &md_metadata.attr,
  4163. &md_new_device.attr,
  4164. &md_safe_delay.attr,
  4165. &md_array_state.attr,
  4166. &md_reshape_position.attr,
  4167. &md_reshape_direction.attr,
  4168. &md_array_size.attr,
  4169. &max_corr_read_errors.attr,
  4170. NULL,
  4171. };
  4172. static struct attribute *md_redundancy_attrs[] = {
  4173. &md_scan_mode.attr,
  4174. &md_last_scan_mode.attr,
  4175. &md_mismatches.attr,
  4176. &md_sync_min.attr,
  4177. &md_sync_max.attr,
  4178. &md_sync_speed.attr,
  4179. &md_sync_force_parallel.attr,
  4180. &md_sync_completed.attr,
  4181. &md_min_sync.attr,
  4182. &md_max_sync.attr,
  4183. &md_suspend_lo.attr,
  4184. &md_suspend_hi.attr,
  4185. &md_bitmap.attr,
  4186. &md_degraded.attr,
  4187. NULL,
  4188. };
  4189. static struct attribute_group md_redundancy_group = {
  4190. .name = NULL,
  4191. .attrs = md_redundancy_attrs,
  4192. };
  4193. static ssize_t
  4194. md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  4195. {
  4196. struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
  4197. struct mddev *mddev = container_of(kobj, struct mddev, kobj);
  4198. ssize_t rv;
  4199. if (!entry->show)
  4200. return -EIO;
  4201. spin_lock(&all_mddevs_lock);
  4202. if (list_empty(&mddev->all_mddevs)) {
  4203. spin_unlock(&all_mddevs_lock);
  4204. return -EBUSY;
  4205. }
  4206. mddev_get(mddev);
  4207. spin_unlock(&all_mddevs_lock);
  4208. rv = mddev_lock(mddev);
  4209. if (!rv) {
  4210. rv = entry->show(mddev, page);
  4211. mddev_unlock(mddev);
  4212. }
  4213. mddev_put(mddev);
  4214. return rv;
  4215. }
  4216. static ssize_t
  4217. md_attr_store(struct kobject *kobj, struct attribute *attr,
  4218. const char *page, size_t length)
  4219. {
  4220. struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
  4221. struct mddev *mddev = container_of(kobj, struct mddev, kobj);
  4222. ssize_t rv;
  4223. if (!entry->store)
  4224. return -EIO;
  4225. if (!capable(CAP_SYS_ADMIN))
  4226. return -EACCES;
  4227. spin_lock(&all_mddevs_lock);
  4228. if (list_empty(&mddev->all_mddevs)) {
  4229. spin_unlock(&all_mddevs_lock);
  4230. return -EBUSY;
  4231. }
  4232. mddev_get(mddev);
  4233. spin_unlock(&all_mddevs_lock);
  4234. if (entry->store == new_dev_store)
  4235. flush_workqueue(md_misc_wq);
  4236. rv = mddev_lock(mddev);
  4237. if (!rv) {
  4238. rv = entry->store(mddev, page, length);
  4239. mddev_unlock(mddev);
  4240. }
  4241. mddev_put(mddev);
  4242. return rv;
  4243. }
  4244. static void md_free(struct kobject *ko)
  4245. {
  4246. struct mddev *mddev = container_of(ko, struct mddev, kobj);
  4247. if (mddev->sysfs_state)
  4248. sysfs_put(mddev->sysfs_state);
  4249. if (mddev->gendisk) {
  4250. del_gendisk(mddev->gendisk);
  4251. put_disk(mddev->gendisk);
  4252. }
  4253. if (mddev->queue)
  4254. blk_cleanup_queue(mddev->queue);
  4255. kfree(mddev);
  4256. }
  4257. static const struct sysfs_ops md_sysfs_ops = {
  4258. .show = md_attr_show,
  4259. .store = md_attr_store,
  4260. };
  4261. static struct kobj_type md_ktype = {
  4262. .release = md_free,
  4263. .sysfs_ops = &md_sysfs_ops,
  4264. .default_attrs = md_default_attrs,
  4265. };
  4266. int mdp_major = 0;
  4267. static void mddev_delayed_delete(struct work_struct *ws)
  4268. {
  4269. struct mddev *mddev = container_of(ws, struct mddev, del_work);
  4270. sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
  4271. kobject_del(&mddev->kobj);
  4272. kobject_put(&mddev->kobj);
  4273. }
  4274. static int md_alloc(dev_t dev, char *name)
  4275. {
  4276. static DEFINE_MUTEX(disks_mutex);
  4277. struct mddev *mddev = mddev_find(dev);
  4278. struct gendisk *disk;
  4279. int partitioned;
  4280. int shift;
  4281. int unit;
  4282. int error;
  4283. if (!mddev)
  4284. return -ENODEV;
  4285. partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
  4286. shift = partitioned ? MdpMinorShift : 0;
  4287. unit = MINOR(mddev->unit) >> shift;
  4288. /* wait for any previous instance of this device to be
  4289. * completely removed (mddev_delayed_delete).
  4290. */
  4291. flush_workqueue(md_misc_wq);
  4292. mutex_lock(&disks_mutex);
  4293. error = -EEXIST;
  4294. if (mddev->gendisk)
  4295. goto abort;
  4296. if (name) {
  4297. /* Need to ensure that 'name' is not a duplicate.
  4298. */
  4299. struct mddev *mddev2;
  4300. spin_lock(&all_mddevs_lock);
  4301. list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
  4302. if (mddev2->gendisk &&
  4303. strcmp(mddev2->gendisk->disk_name, name) == 0) {
  4304. spin_unlock(&all_mddevs_lock);
  4305. goto abort;
  4306. }
  4307. spin_unlock(&all_mddevs_lock);
  4308. }
  4309. error = -ENOMEM;
  4310. mddev->queue = blk_alloc_queue(GFP_KERNEL);
  4311. if (!mddev->queue)
  4312. goto abort;
  4313. mddev->queue->queuedata = mddev;
  4314. blk_queue_make_request(mddev->queue, md_make_request);
  4315. blk_set_stacking_limits(&mddev->queue->limits);
  4316. disk = alloc_disk(1 << shift);
  4317. if (!disk) {
  4318. blk_cleanup_queue(mddev->queue);
  4319. mddev->queue = NULL;
  4320. goto abort;
  4321. }
  4322. disk->major = MAJOR(mddev->unit);
  4323. disk->first_minor = unit << shift;
  4324. if (name)
  4325. strcpy(disk->disk_name, name);
  4326. else if (partitioned)
  4327. sprintf(disk->disk_name, "md_d%d", unit);
  4328. else
  4329. sprintf(disk->disk_name, "md%d", unit);
  4330. disk->fops = &md_fops;
  4331. disk->private_data = mddev;
  4332. disk->queue = mddev->queue;
  4333. blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
  4334. /* Allow extended partitions. This makes the
  4335. * 'mdp' device redundant, but we can't really
  4336. * remove it now.
  4337. */
  4338. disk->flags |= GENHD_FL_EXT_DEVT;
  4339. mddev->gendisk = disk;
  4340. /* As soon as we call add_disk(), another thread could get
  4341. * through to md_open, so make sure it doesn't get too far
  4342. */
  4343. mutex_lock(&mddev->open_mutex);
  4344. add_disk(disk);
  4345. error = kobject_init_and_add(&mddev->kobj, &md_ktype,
  4346. &disk_to_dev(disk)->kobj, "%s", "md");
  4347. if (error) {
  4348. /* This isn't possible, but as kobject_init_and_add is marked
  4349. * __must_check, we must do something with the result
  4350. */
  4351. printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
  4352. disk->disk_name);
  4353. error = 0;
  4354. }
  4355. if (mddev->kobj.sd &&
  4356. sysfs_create_group(&mddev->kobj, &md_bitmap_group))
  4357. printk(KERN_DEBUG "pointless warning\n");
  4358. mutex_unlock(&mddev->open_mutex);
  4359. abort:
  4360. mutex_unlock(&disks_mutex);
  4361. if (!error && mddev->kobj.sd) {
  4362. kobject_uevent(&mddev->kobj, KOBJ_ADD);
  4363. mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
  4364. }
  4365. mddev_put(mddev);
  4366. return error;
  4367. }
  4368. static struct kobject *md_probe(dev_t dev, int *part, void *data)
  4369. {
  4370. md_alloc(dev, NULL);
  4371. return NULL;
  4372. }
  4373. static int add_named_array(const char *val, struct kernel_param *kp)
  4374. {
  4375. /* val must be "md_*" where * is not all digits.
  4376. * We allocate an array with a large free minor number, and
  4377. * set the name to val. val must not already be an active name.
  4378. */
  4379. int len = strlen(val);
  4380. char buf[DISK_NAME_LEN];
  4381. while (len && val[len-1] == '\n')
  4382. len--;
  4383. if (len >= DISK_NAME_LEN)
  4384. return -E2BIG;
  4385. strlcpy(buf, val, len+1);
  4386. if (strncmp(buf, "md_", 3) != 0)
  4387. return -EINVAL;
  4388. return md_alloc(0, buf);
  4389. }
  4390. static void md_safemode_timeout(unsigned long data)
  4391. {
  4392. struct mddev *mddev = (struct mddev *) data;
  4393. if (!atomic_read(&mddev->writes_pending)) {
  4394. mddev->safemode = 1;
  4395. if (mddev->external)
  4396. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4397. }
  4398. md_wakeup_thread(mddev->thread);
  4399. }
  4400. static int start_dirty_degraded;
  4401. int md_run(struct mddev *mddev)
  4402. {
  4403. int err;
  4404. struct md_rdev *rdev;
  4405. struct md_personality *pers;
  4406. if (list_empty(&mddev->disks))
  4407. /* cannot run an array with no devices.. */
  4408. return -EINVAL;
  4409. if (mddev->pers)
  4410. return -EBUSY;
  4411. /* Cannot run until previous stop completes properly */
  4412. if (mddev->sysfs_active)
  4413. return -EBUSY;
  4414. /*
  4415. * Analyze all RAID superblock(s)
  4416. */
  4417. if (!mddev->raid_disks) {
  4418. if (!mddev->persistent)
  4419. return -EINVAL;
  4420. analyze_sbs(mddev);
  4421. }
  4422. if (mddev->level != LEVEL_NONE)
  4423. request_module("md-level-%d", mddev->level);
  4424. else if (mddev->clevel[0])
  4425. request_module("md-%s", mddev->clevel);
  4426. /*
  4427. * Drop all container device buffers, from now on
  4428. * the only valid external interface is through the md
  4429. * device.
  4430. */
  4431. rdev_for_each(rdev, mddev) {
  4432. if (test_bit(Faulty, &rdev->flags))
  4433. continue;
  4434. sync_blockdev(rdev->bdev);
  4435. invalidate_bdev(rdev->bdev);
  4436. /* perform some consistency tests on the device.
  4437. * We don't want the data to overlap the metadata,
  4438. * Internal Bitmap issues have been handled elsewhere.
  4439. */
  4440. if (rdev->meta_bdev) {
  4441. /* Nothing to check */;
  4442. } else if (rdev->data_offset < rdev->sb_start) {
  4443. if (mddev->dev_sectors &&
  4444. rdev->data_offset + mddev->dev_sectors
  4445. > rdev->sb_start) {
  4446. printk("md: %s: data overlaps metadata\n",
  4447. mdname(mddev));
  4448. return -EINVAL;
  4449. }
  4450. } else {
  4451. if (rdev->sb_start + rdev->sb_size/512
  4452. > rdev->data_offset) {
  4453. printk("md: %s: metadata overlaps data\n",
  4454. mdname(mddev));
  4455. return -EINVAL;
  4456. }
  4457. }
  4458. sysfs_notify_dirent_safe(rdev->sysfs_state);
  4459. }
  4460. if (mddev->bio_set == NULL)
  4461. mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
  4462. spin_lock(&pers_lock);
  4463. pers = find_pers(mddev->level, mddev->clevel);
  4464. if (!pers || !try_module_get(pers->owner)) {
  4465. spin_unlock(&pers_lock);
  4466. if (mddev->level != LEVEL_NONE)
  4467. printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
  4468. mddev->level);
  4469. else
  4470. printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
  4471. mddev->clevel);
  4472. return -EINVAL;
  4473. }
  4474. mddev->pers = pers;
  4475. spin_unlock(&pers_lock);
  4476. if (mddev->level != pers->level) {
  4477. mddev->level = pers->level;
  4478. mddev->new_level = pers->level;
  4479. }
  4480. strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
  4481. if (mddev->reshape_position != MaxSector &&
  4482. pers->start_reshape == NULL) {
  4483. /* This personality cannot handle reshaping... */
  4484. mddev->pers = NULL;
  4485. module_put(pers->owner);
  4486. return -EINVAL;
  4487. }
  4488. if (pers->sync_request) {
  4489. /* Warn if this is a potentially silly
  4490. * configuration.
  4491. */
  4492. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  4493. struct md_rdev *rdev2;
  4494. int warned = 0;
  4495. rdev_for_each(rdev, mddev)
  4496. rdev_for_each(rdev2, mddev) {
  4497. if (rdev < rdev2 &&
  4498. rdev->bdev->bd_contains ==
  4499. rdev2->bdev->bd_contains) {
  4500. printk(KERN_WARNING
  4501. "%s: WARNING: %s appears to be"
  4502. " on the same physical disk as"
  4503. " %s.\n",
  4504. mdname(mddev),
  4505. bdevname(rdev->bdev,b),
  4506. bdevname(rdev2->bdev,b2));
  4507. warned = 1;
  4508. }
  4509. }
  4510. if (warned)
  4511. printk(KERN_WARNING
  4512. "True protection against single-disk"
  4513. " failure might be compromised.\n");
  4514. }
  4515. mddev->recovery = 0;
  4516. /* may be over-ridden by personality */
  4517. mddev->resync_max_sectors = mddev->dev_sectors;
  4518. mddev->ok_start_degraded = start_dirty_degraded;
  4519. if (start_readonly && mddev->ro == 0)
  4520. mddev->ro = 2; /* read-only, but switch on first write */
  4521. err = mddev->pers->run(mddev);
  4522. if (err)
  4523. printk(KERN_ERR "md: pers->run() failed ...\n");
  4524. else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
  4525. WARN_ONCE(!mddev->external_size, "%s: default size too small,"
  4526. " but 'external_size' not in effect?\n", __func__);
  4527. printk(KERN_ERR
  4528. "md: invalid array_size %llu > default size %llu\n",
  4529. (unsigned long long)mddev->array_sectors / 2,
  4530. (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
  4531. err = -EINVAL;
  4532. mddev->pers->stop(mddev);
  4533. }
  4534. if (err == 0 && mddev->pers->sync_request &&
  4535. (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
  4536. err = bitmap_create(mddev);
  4537. if (err) {
  4538. printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
  4539. mdname(mddev), err);
  4540. mddev->pers->stop(mddev);
  4541. }
  4542. }
  4543. if (err) {
  4544. module_put(mddev->pers->owner);
  4545. mddev->pers = NULL;
  4546. bitmap_destroy(mddev);
  4547. return err;
  4548. }
  4549. if (mddev->pers->sync_request) {
  4550. if (mddev->kobj.sd &&
  4551. sysfs_create_group(&mddev->kobj, &md_redundancy_group))
  4552. printk(KERN_WARNING
  4553. "md: cannot register extra attributes for %s\n",
  4554. mdname(mddev));
  4555. mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
  4556. } else if (mddev->ro == 2) /* auto-readonly not meaningful */
  4557. mddev->ro = 0;
  4558. atomic_set(&mddev->writes_pending,0);
  4559. atomic_set(&mddev->max_corr_read_errors,
  4560. MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
  4561. mddev->safemode = 0;
  4562. mddev->safemode_timer.function = md_safemode_timeout;
  4563. mddev->safemode_timer.data = (unsigned long) mddev;
  4564. mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
  4565. mddev->in_sync = 1;
  4566. smp_wmb();
  4567. mddev->ready = 1;
  4568. rdev_for_each(rdev, mddev)
  4569. if (rdev->raid_disk >= 0)
  4570. if (sysfs_link_rdev(mddev, rdev))
  4571. /* failure here is OK */;
  4572. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  4573. if (mddev->flags & MD_UPDATE_SB_FLAGS)
  4574. md_update_sb(mddev, 0);
  4575. md_new_event(mddev);
  4576. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4577. sysfs_notify_dirent_safe(mddev->sysfs_action);
  4578. sysfs_notify(&mddev->kobj, NULL, "degraded");
  4579. return 0;
  4580. }
  4581. EXPORT_SYMBOL_GPL(md_run);
  4582. static int do_md_run(struct mddev *mddev)
  4583. {
  4584. int err;
  4585. err = md_run(mddev);
  4586. if (err)
  4587. goto out;
  4588. err = bitmap_load(mddev);
  4589. if (err) {
  4590. bitmap_destroy(mddev);
  4591. goto out;
  4592. }
  4593. md_wakeup_thread(mddev->thread);
  4594. md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
  4595. set_capacity(mddev->gendisk, mddev->array_sectors);
  4596. revalidate_disk(mddev->gendisk);
  4597. mddev->changed = 1;
  4598. kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
  4599. out:
  4600. return err;
  4601. }
  4602. static int restart_array(struct mddev *mddev)
  4603. {
  4604. struct gendisk *disk = mddev->gendisk;
  4605. /* Complain if it has no devices */
  4606. if (list_empty(&mddev->disks))
  4607. return -ENXIO;
  4608. if (!mddev->pers)
  4609. return -EINVAL;
  4610. if (!mddev->ro)
  4611. return -EBUSY;
  4612. mddev->safemode = 0;
  4613. mddev->ro = 0;
  4614. set_disk_ro(disk, 0);
  4615. printk(KERN_INFO "md: %s switched to read-write mode.\n",
  4616. mdname(mddev));
  4617. /* Kick recovery or resync if necessary */
  4618. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  4619. md_wakeup_thread(mddev->thread);
  4620. md_wakeup_thread(mddev->sync_thread);
  4621. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4622. return 0;
  4623. }
  4624. /* similar to deny_write_access, but accounts for our holding a reference
  4625. * to the file ourselves */
  4626. static int deny_bitmap_write_access(struct file * file)
  4627. {
  4628. struct inode *inode = file->f_mapping->host;
  4629. spin_lock(&inode->i_lock);
  4630. if (atomic_read(&inode->i_writecount) > 1) {
  4631. spin_unlock(&inode->i_lock);
  4632. return -ETXTBSY;
  4633. }
  4634. atomic_set(&inode->i_writecount, -1);
  4635. spin_unlock(&inode->i_lock);
  4636. return 0;
  4637. }
  4638. void restore_bitmap_write_access(struct file *file)
  4639. {
  4640. struct inode *inode = file->f_mapping->host;
  4641. spin_lock(&inode->i_lock);
  4642. atomic_set(&inode->i_writecount, 1);
  4643. spin_unlock(&inode->i_lock);
  4644. }
  4645. static void md_clean(struct mddev *mddev)
  4646. {
  4647. mddev->array_sectors = 0;
  4648. mddev->external_size = 0;
  4649. mddev->dev_sectors = 0;
  4650. mddev->raid_disks = 0;
  4651. mddev->recovery_cp = 0;
  4652. mddev->resync_min = 0;
  4653. mddev->resync_max = MaxSector;
  4654. mddev->reshape_position = MaxSector;
  4655. mddev->external = 0;
  4656. mddev->persistent = 0;
  4657. mddev->level = LEVEL_NONE;
  4658. mddev->clevel[0] = 0;
  4659. mddev->flags = 0;
  4660. mddev->ro = 0;
  4661. mddev->metadata_type[0] = 0;
  4662. mddev->chunk_sectors = 0;
  4663. mddev->ctime = mddev->utime = 0;
  4664. mddev->layout = 0;
  4665. mddev->max_disks = 0;
  4666. mddev->events = 0;
  4667. mddev->can_decrease_events = 0;
  4668. mddev->delta_disks = 0;
  4669. mddev->reshape_backwards = 0;
  4670. mddev->new_level = LEVEL_NONE;
  4671. mddev->new_layout = 0;
  4672. mddev->new_chunk_sectors = 0;
  4673. mddev->curr_resync = 0;
  4674. atomic64_set(&mddev->resync_mismatches, 0);
  4675. mddev->suspend_lo = mddev->suspend_hi = 0;
  4676. mddev->sync_speed_min = mddev->sync_speed_max = 0;
  4677. mddev->recovery = 0;
  4678. mddev->in_sync = 0;
  4679. mddev->changed = 0;
  4680. mddev->degraded = 0;
  4681. mddev->safemode = 0;
  4682. mddev->merge_check_needed = 0;
  4683. mddev->bitmap_info.offset = 0;
  4684. mddev->bitmap_info.default_offset = 0;
  4685. mddev->bitmap_info.default_space = 0;
  4686. mddev->bitmap_info.chunksize = 0;
  4687. mddev->bitmap_info.daemon_sleep = 0;
  4688. mddev->bitmap_info.max_write_behind = 0;
  4689. }
  4690. static void __md_stop_writes(struct mddev *mddev)
  4691. {
  4692. set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4693. if (mddev->sync_thread) {
  4694. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  4695. md_reap_sync_thread(mddev);
  4696. }
  4697. del_timer_sync(&mddev->safemode_timer);
  4698. bitmap_flush(mddev);
  4699. md_super_wait(mddev);
  4700. if (mddev->ro == 0 &&
  4701. (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
  4702. /* mark array as shutdown cleanly */
  4703. mddev->in_sync = 1;
  4704. md_update_sb(mddev, 1);
  4705. }
  4706. }
  4707. void md_stop_writes(struct mddev *mddev)
  4708. {
  4709. mddev_lock(mddev);
  4710. __md_stop_writes(mddev);
  4711. mddev_unlock(mddev);
  4712. }
  4713. EXPORT_SYMBOL_GPL(md_stop_writes);
  4714. static void __md_stop(struct mddev *mddev)
  4715. {
  4716. mddev->ready = 0;
  4717. mddev->pers->stop(mddev);
  4718. if (mddev->pers->sync_request && mddev->to_remove == NULL)
  4719. mddev->to_remove = &md_redundancy_group;
  4720. module_put(mddev->pers->owner);
  4721. mddev->pers = NULL;
  4722. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4723. }
  4724. void md_stop(struct mddev *mddev)
  4725. {
  4726. /* stop the array and free an attached data structures.
  4727. * This is called from dm-raid
  4728. */
  4729. __md_stop(mddev);
  4730. bitmap_destroy(mddev);
  4731. if (mddev->bio_set)
  4732. bioset_free(mddev->bio_set);
  4733. }
  4734. EXPORT_SYMBOL_GPL(md_stop);
  4735. static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
  4736. {
  4737. int err = 0;
  4738. mutex_lock(&mddev->open_mutex);
  4739. if (atomic_read(&mddev->openers) > !!bdev) {
  4740. printk("md: %s still in use.\n",mdname(mddev));
  4741. err = -EBUSY;
  4742. goto out;
  4743. }
  4744. if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
  4745. /* Someone opened the device since we flushed it
  4746. * so page cache could be dirty and it is too late
  4747. * to flush. So abort
  4748. */
  4749. mutex_unlock(&mddev->open_mutex);
  4750. return -EBUSY;
  4751. }
  4752. if (mddev->pers) {
  4753. __md_stop_writes(mddev);
  4754. err = -ENXIO;
  4755. if (mddev->ro==1)
  4756. goto out;
  4757. mddev->ro = 1;
  4758. set_disk_ro(mddev->gendisk, 1);
  4759. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4760. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4761. err = 0;
  4762. }
  4763. out:
  4764. mutex_unlock(&mddev->open_mutex);
  4765. return err;
  4766. }
  4767. /* mode:
  4768. * 0 - completely stop and dis-assemble array
  4769. * 2 - stop but do not disassemble array
  4770. */
  4771. static int do_md_stop(struct mddev * mddev, int mode,
  4772. struct block_device *bdev)
  4773. {
  4774. struct gendisk *disk = mddev->gendisk;
  4775. struct md_rdev *rdev;
  4776. mutex_lock(&mddev->open_mutex);
  4777. if (atomic_read(&mddev->openers) > !!bdev ||
  4778. mddev->sysfs_active) {
  4779. printk("md: %s still in use.\n",mdname(mddev));
  4780. mutex_unlock(&mddev->open_mutex);
  4781. return -EBUSY;
  4782. }
  4783. if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
  4784. /* Someone opened the device since we flushed it
  4785. * so page cache could be dirty and it is too late
  4786. * to flush. So abort
  4787. */
  4788. mutex_unlock(&mddev->open_mutex);
  4789. return -EBUSY;
  4790. }
  4791. if (mddev->pers) {
  4792. if (mddev->ro)
  4793. set_disk_ro(disk, 0);
  4794. __md_stop_writes(mddev);
  4795. __md_stop(mddev);
  4796. mddev->queue->merge_bvec_fn = NULL;
  4797. mddev->queue->backing_dev_info.congested_fn = NULL;
  4798. /* tell userspace to handle 'inactive' */
  4799. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4800. rdev_for_each(rdev, mddev)
  4801. if (rdev->raid_disk >= 0)
  4802. sysfs_unlink_rdev(mddev, rdev);
  4803. set_capacity(disk, 0);
  4804. mutex_unlock(&mddev->open_mutex);
  4805. mddev->changed = 1;
  4806. revalidate_disk(disk);
  4807. if (mddev->ro)
  4808. mddev->ro = 0;
  4809. } else
  4810. mutex_unlock(&mddev->open_mutex);
  4811. /*
  4812. * Free resources if final stop
  4813. */
  4814. if (mode == 0) {
  4815. printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
  4816. bitmap_destroy(mddev);
  4817. if (mddev->bitmap_info.file) {
  4818. restore_bitmap_write_access(mddev->bitmap_info.file);
  4819. fput(mddev->bitmap_info.file);
  4820. mddev->bitmap_info.file = NULL;
  4821. }
  4822. mddev->bitmap_info.offset = 0;
  4823. export_array(mddev);
  4824. md_clean(mddev);
  4825. kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
  4826. if (mddev->hold_active == UNTIL_STOP)
  4827. mddev->hold_active = 0;
  4828. }
  4829. blk_integrity_unregister(disk);
  4830. md_new_event(mddev);
  4831. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4832. return 0;
  4833. }
  4834. #ifndef MODULE
  4835. static void autorun_array(struct mddev *mddev)
  4836. {
  4837. struct md_rdev *rdev;
  4838. int err;
  4839. if (list_empty(&mddev->disks))
  4840. return;
  4841. printk(KERN_INFO "md: running: ");
  4842. rdev_for_each(rdev, mddev) {
  4843. char b[BDEVNAME_SIZE];
  4844. printk("<%s>", bdevname(rdev->bdev,b));
  4845. }
  4846. printk("\n");
  4847. err = do_md_run(mddev);
  4848. if (err) {
  4849. printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
  4850. do_md_stop(mddev, 0, NULL);
  4851. }
  4852. }
  4853. /*
  4854. * lets try to run arrays based on all disks that have arrived
  4855. * until now. (those are in pending_raid_disks)
  4856. *
  4857. * the method: pick the first pending disk, collect all disks with
  4858. * the same UUID, remove all from the pending list and put them into
  4859. * the 'same_array' list. Then order this list based on superblock
  4860. * update time (freshest comes first), kick out 'old' disks and
  4861. * compare superblocks. If everything's fine then run it.
  4862. *
  4863. * If "unit" is allocated, then bump its reference count
  4864. */
  4865. static void autorun_devices(int part)
  4866. {
  4867. struct md_rdev *rdev0, *rdev, *tmp;
  4868. struct mddev *mddev;
  4869. char b[BDEVNAME_SIZE];
  4870. printk(KERN_INFO "md: autorun ...\n");
  4871. while (!list_empty(&pending_raid_disks)) {
  4872. int unit;
  4873. dev_t dev;
  4874. LIST_HEAD(candidates);
  4875. rdev0 = list_entry(pending_raid_disks.next,
  4876. struct md_rdev, same_set);
  4877. printk(KERN_INFO "md: considering %s ...\n",
  4878. bdevname(rdev0->bdev,b));
  4879. INIT_LIST_HEAD(&candidates);
  4880. rdev_for_each_list(rdev, tmp, &pending_raid_disks)
  4881. if (super_90_load(rdev, rdev0, 0) >= 0) {
  4882. printk(KERN_INFO "md: adding %s ...\n",
  4883. bdevname(rdev->bdev,b));
  4884. list_move(&rdev->same_set, &candidates);
  4885. }
  4886. /*
  4887. * now we have a set of devices, with all of them having
  4888. * mostly sane superblocks. It's time to allocate the
  4889. * mddev.
  4890. */
  4891. if (part) {
  4892. dev = MKDEV(mdp_major,
  4893. rdev0->preferred_minor << MdpMinorShift);
  4894. unit = MINOR(dev) >> MdpMinorShift;
  4895. } else {
  4896. dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
  4897. unit = MINOR(dev);
  4898. }
  4899. if (rdev0->preferred_minor != unit) {
  4900. printk(KERN_INFO "md: unit number in %s is bad: %d\n",
  4901. bdevname(rdev0->bdev, b), rdev0->preferred_minor);
  4902. break;
  4903. }
  4904. md_probe(dev, NULL, NULL);
  4905. mddev = mddev_find(dev);
  4906. if (!mddev || !mddev->gendisk) {
  4907. if (mddev)
  4908. mddev_put(mddev);
  4909. printk(KERN_ERR
  4910. "md: cannot allocate memory for md drive.\n");
  4911. break;
  4912. }
  4913. if (mddev_lock(mddev))
  4914. printk(KERN_WARNING "md: %s locked, cannot run\n",
  4915. mdname(mddev));
  4916. else if (mddev->raid_disks || mddev->major_version
  4917. || !list_empty(&mddev->disks)) {
  4918. printk(KERN_WARNING
  4919. "md: %s already running, cannot run %s\n",
  4920. mdname(mddev), bdevname(rdev0->bdev,b));
  4921. mddev_unlock(mddev);
  4922. } else {
  4923. printk(KERN_INFO "md: created %s\n", mdname(mddev));
  4924. mddev->persistent = 1;
  4925. rdev_for_each_list(rdev, tmp, &candidates) {
  4926. list_del_init(&rdev->same_set);
  4927. if (bind_rdev_to_array(rdev, mddev))
  4928. export_rdev(rdev);
  4929. }
  4930. autorun_array(mddev);
  4931. mddev_unlock(mddev);
  4932. }
  4933. /* on success, candidates will be empty, on error
  4934. * it won't...
  4935. */
  4936. rdev_for_each_list(rdev, tmp, &candidates) {
  4937. list_del_init(&rdev->same_set);
  4938. export_rdev(rdev);
  4939. }
  4940. mddev_put(mddev);
  4941. }
  4942. printk(KERN_INFO "md: ... autorun DONE.\n");
  4943. }
  4944. #endif /* !MODULE */
  4945. static int get_version(void __user * arg)
  4946. {
  4947. mdu_version_t ver;
  4948. ver.major = MD_MAJOR_VERSION;
  4949. ver.minor = MD_MINOR_VERSION;
  4950. ver.patchlevel = MD_PATCHLEVEL_VERSION;
  4951. if (copy_to_user(arg, &ver, sizeof(ver)))
  4952. return -EFAULT;
  4953. return 0;
  4954. }
  4955. static int get_array_info(struct mddev * mddev, void __user * arg)
  4956. {
  4957. mdu_array_info_t info;
  4958. int nr,working,insync,failed,spare;
  4959. struct md_rdev *rdev;
  4960. nr = working = insync = failed = spare = 0;
  4961. rcu_read_lock();
  4962. rdev_for_each_rcu(rdev, mddev) {
  4963. nr++;
  4964. if (test_bit(Faulty, &rdev->flags))
  4965. failed++;
  4966. else {
  4967. working++;
  4968. if (test_bit(In_sync, &rdev->flags))
  4969. insync++;
  4970. else
  4971. spare++;
  4972. }
  4973. }
  4974. rcu_read_unlock();
  4975. info.major_version = mddev->major_version;
  4976. info.minor_version = mddev->minor_version;
  4977. info.patch_version = MD_PATCHLEVEL_VERSION;
  4978. info.ctime = mddev->ctime;
  4979. info.level = mddev->level;
  4980. info.size = mddev->dev_sectors / 2;
  4981. if (info.size != mddev->dev_sectors / 2) /* overflow */
  4982. info.size = -1;
  4983. info.nr_disks = nr;
  4984. info.raid_disks = mddev->raid_disks;
  4985. info.md_minor = mddev->md_minor;
  4986. info.not_persistent= !mddev->persistent;
  4987. info.utime = mddev->utime;
  4988. info.state = 0;
  4989. if (mddev->in_sync)
  4990. info.state = (1<<MD_SB_CLEAN);
  4991. if (mddev->bitmap && mddev->bitmap_info.offset)
  4992. info.state = (1<<MD_SB_BITMAP_PRESENT);
  4993. info.active_disks = insync;
  4994. info.working_disks = working;
  4995. info.failed_disks = failed;
  4996. info.spare_disks = spare;
  4997. info.layout = mddev->layout;
  4998. info.chunk_size = mddev->chunk_sectors << 9;
  4999. if (copy_to_user(arg, &info, sizeof(info)))
  5000. return -EFAULT;
  5001. return 0;
  5002. }
  5003. static int get_bitmap_file(struct mddev * mddev, void __user * arg)
  5004. {
  5005. mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
  5006. char *ptr, *buf = NULL;
  5007. int err = -ENOMEM;
  5008. file = kmalloc(sizeof(*file), GFP_NOIO);
  5009. if (!file)
  5010. goto out;
  5011. /* bitmap disabled, zero the first byte and copy out */
  5012. if (!mddev->bitmap || !mddev->bitmap->storage.file) {
  5013. file->pathname[0] = '\0';
  5014. goto copy_out;
  5015. }
  5016. buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
  5017. if (!buf)
  5018. goto out;
  5019. ptr = d_path(&mddev->bitmap->storage.file->f_path,
  5020. buf, sizeof(file->pathname));
  5021. if (IS_ERR(ptr))
  5022. goto out;
  5023. strcpy(file->pathname, ptr);
  5024. copy_out:
  5025. err = 0;
  5026. if (copy_to_user(arg, file, sizeof(*file)))
  5027. err = -EFAULT;
  5028. out:
  5029. kfree(buf);
  5030. kfree(file);
  5031. return err;
  5032. }
  5033. static int get_disk_info(struct mddev * mddev, void __user * arg)
  5034. {
  5035. mdu_disk_info_t info;
  5036. struct md_rdev *rdev;
  5037. if (copy_from_user(&info, arg, sizeof(info)))
  5038. return -EFAULT;
  5039. rcu_read_lock();
  5040. rdev = find_rdev_nr_rcu(mddev, info.number);
  5041. if (rdev) {
  5042. info.major = MAJOR(rdev->bdev->bd_dev);
  5043. info.minor = MINOR(rdev->bdev->bd_dev);
  5044. info.raid_disk = rdev->raid_disk;
  5045. info.state = 0;
  5046. if (test_bit(Faulty, &rdev->flags))
  5047. info.state |= (1<<MD_DISK_FAULTY);
  5048. else if (test_bit(In_sync, &rdev->flags)) {
  5049. info.state |= (1<<MD_DISK_ACTIVE);
  5050. info.state |= (1<<MD_DISK_SYNC);
  5051. }
  5052. if (test_bit(WriteMostly, &rdev->flags))
  5053. info.state |= (1<<MD_DISK_WRITEMOSTLY);
  5054. } else {
  5055. info.major = info.minor = 0;
  5056. info.raid_disk = -1;
  5057. info.state = (1<<MD_DISK_REMOVED);
  5058. }
  5059. rcu_read_unlock();
  5060. if (copy_to_user(arg, &info, sizeof(info)))
  5061. return -EFAULT;
  5062. return 0;
  5063. }
  5064. static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
  5065. {
  5066. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  5067. struct md_rdev *rdev;
  5068. dev_t dev = MKDEV(info->major,info->minor);
  5069. if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
  5070. return -EOVERFLOW;
  5071. if (!mddev->raid_disks) {
  5072. int err;
  5073. /* expecting a device which has a superblock */
  5074. rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
  5075. if (IS_ERR(rdev)) {
  5076. printk(KERN_WARNING
  5077. "md: md_import_device returned %ld\n",
  5078. PTR_ERR(rdev));
  5079. return PTR_ERR(rdev);
  5080. }
  5081. if (!list_empty(&mddev->disks)) {
  5082. struct md_rdev *rdev0
  5083. = list_entry(mddev->disks.next,
  5084. struct md_rdev, same_set);
  5085. err = super_types[mddev->major_version]
  5086. .load_super(rdev, rdev0, mddev->minor_version);
  5087. if (err < 0) {
  5088. printk(KERN_WARNING
  5089. "md: %s has different UUID to %s\n",
  5090. bdevname(rdev->bdev,b),
  5091. bdevname(rdev0->bdev,b2));
  5092. export_rdev(rdev);
  5093. return -EINVAL;
  5094. }
  5095. }
  5096. err = bind_rdev_to_array(rdev, mddev);
  5097. if (err)
  5098. export_rdev(rdev);
  5099. return err;
  5100. }
  5101. /*
  5102. * add_new_disk can be used once the array is assembled
  5103. * to add "hot spares". They must already have a superblock
  5104. * written
  5105. */
  5106. if (mddev->pers) {
  5107. int err;
  5108. if (!mddev->pers->hot_add_disk) {
  5109. printk(KERN_WARNING
  5110. "%s: personality does not support diskops!\n",
  5111. mdname(mddev));
  5112. return -EINVAL;
  5113. }
  5114. if (mddev->persistent)
  5115. rdev = md_import_device(dev, mddev->major_version,
  5116. mddev->minor_version);
  5117. else
  5118. rdev = md_import_device(dev, -1, -1);
  5119. if (IS_ERR(rdev)) {
  5120. printk(KERN_WARNING
  5121. "md: md_import_device returned %ld\n",
  5122. PTR_ERR(rdev));
  5123. return PTR_ERR(rdev);
  5124. }
  5125. /* set saved_raid_disk if appropriate */
  5126. if (!mddev->persistent) {
  5127. if (info->state & (1<<MD_DISK_SYNC) &&
  5128. info->raid_disk < mddev->raid_disks) {
  5129. rdev->raid_disk = info->raid_disk;
  5130. set_bit(In_sync, &rdev->flags);
  5131. } else
  5132. rdev->raid_disk = -1;
  5133. } else
  5134. super_types[mddev->major_version].
  5135. validate_super(mddev, rdev);
  5136. if ((info->state & (1<<MD_DISK_SYNC)) &&
  5137. rdev->raid_disk != info->raid_disk) {
  5138. /* This was a hot-add request, but events doesn't
  5139. * match, so reject it.
  5140. */
  5141. export_rdev(rdev);
  5142. return -EINVAL;
  5143. }
  5144. if (test_bit(In_sync, &rdev->flags))
  5145. rdev->saved_raid_disk = rdev->raid_disk;
  5146. else
  5147. rdev->saved_raid_disk = -1;
  5148. clear_bit(In_sync, &rdev->flags); /* just to be sure */
  5149. if (info->state & (1<<MD_DISK_WRITEMOSTLY))
  5150. set_bit(WriteMostly, &rdev->flags);
  5151. else
  5152. clear_bit(WriteMostly, &rdev->flags);
  5153. rdev->raid_disk = -1;
  5154. err = bind_rdev_to_array(rdev, mddev);
  5155. if (!err && !mddev->pers->hot_remove_disk) {
  5156. /* If there is hot_add_disk but no hot_remove_disk
  5157. * then added disks for geometry changes,
  5158. * and should be added immediately.
  5159. */
  5160. super_types[mddev->major_version].
  5161. validate_super(mddev, rdev);
  5162. err = mddev->pers->hot_add_disk(mddev, rdev);
  5163. if (err)
  5164. unbind_rdev_from_array(rdev);
  5165. }
  5166. if (err)
  5167. export_rdev(rdev);
  5168. else
  5169. sysfs_notify_dirent_safe(rdev->sysfs_state);
  5170. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  5171. if (mddev->degraded)
  5172. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  5173. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5174. if (!err)
  5175. md_new_event(mddev);
  5176. md_wakeup_thread(mddev->thread);
  5177. return err;
  5178. }
  5179. /* otherwise, add_new_disk is only allowed
  5180. * for major_version==0 superblocks
  5181. */
  5182. if (mddev->major_version != 0) {
  5183. printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
  5184. mdname(mddev));
  5185. return -EINVAL;
  5186. }
  5187. if (!(info->state & (1<<MD_DISK_FAULTY))) {
  5188. int err;
  5189. rdev = md_import_device(dev, -1, 0);
  5190. if (IS_ERR(rdev)) {
  5191. printk(KERN_WARNING
  5192. "md: error, md_import_device() returned %ld\n",
  5193. PTR_ERR(rdev));
  5194. return PTR_ERR(rdev);
  5195. }
  5196. rdev->desc_nr = info->number;
  5197. if (info->raid_disk < mddev->raid_disks)
  5198. rdev->raid_disk = info->raid_disk;
  5199. else
  5200. rdev->raid_disk = -1;
  5201. if (rdev->raid_disk < mddev->raid_disks)
  5202. if (info->state & (1<<MD_DISK_SYNC))
  5203. set_bit(In_sync, &rdev->flags);
  5204. if (info->state & (1<<MD_DISK_WRITEMOSTLY))
  5205. set_bit(WriteMostly, &rdev->flags);
  5206. if (!mddev->persistent) {
  5207. printk(KERN_INFO "md: nonpersistent superblock ...\n");
  5208. rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
  5209. } else
  5210. rdev->sb_start = calc_dev_sboffset(rdev);
  5211. rdev->sectors = rdev->sb_start;
  5212. err = bind_rdev_to_array(rdev, mddev);
  5213. if (err) {
  5214. export_rdev(rdev);
  5215. return err;
  5216. }
  5217. }
  5218. return 0;
  5219. }
  5220. static int hot_remove_disk(struct mddev * mddev, dev_t dev)
  5221. {
  5222. char b[BDEVNAME_SIZE];
  5223. struct md_rdev *rdev;
  5224. rdev = find_rdev(mddev, dev);
  5225. if (!rdev)
  5226. return -ENXIO;
  5227. clear_bit(Blocked, &rdev->flags);
  5228. remove_and_add_spares(mddev, rdev);
  5229. if (rdev->raid_disk >= 0)
  5230. goto busy;
  5231. kick_rdev_from_array(rdev);
  5232. md_update_sb(mddev, 1);
  5233. md_new_event(mddev);
  5234. return 0;
  5235. busy:
  5236. printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
  5237. bdevname(rdev->bdev,b), mdname(mddev));
  5238. return -EBUSY;
  5239. }
  5240. static int hot_add_disk(struct mddev * mddev, dev_t dev)
  5241. {
  5242. char b[BDEVNAME_SIZE];
  5243. int err;
  5244. struct md_rdev *rdev;
  5245. if (!mddev->pers)
  5246. return -ENODEV;
  5247. if (mddev->major_version != 0) {
  5248. printk(KERN_WARNING "%s: HOT_ADD may only be used with"
  5249. " version-0 superblocks.\n",
  5250. mdname(mddev));
  5251. return -EINVAL;
  5252. }
  5253. if (!mddev->pers->hot_add_disk) {
  5254. printk(KERN_WARNING
  5255. "%s: personality does not support diskops!\n",
  5256. mdname(mddev));
  5257. return -EINVAL;
  5258. }
  5259. rdev = md_import_device(dev, -1, 0);
  5260. if (IS_ERR(rdev)) {
  5261. printk(KERN_WARNING
  5262. "md: error, md_import_device() returned %ld\n",
  5263. PTR_ERR(rdev));
  5264. return -EINVAL;
  5265. }
  5266. if (mddev->persistent)
  5267. rdev->sb_start = calc_dev_sboffset(rdev);
  5268. else
  5269. rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
  5270. rdev->sectors = rdev->sb_start;
  5271. if (test_bit(Faulty, &rdev->flags)) {
  5272. printk(KERN_WARNING
  5273. "md: can not hot-add faulty %s disk to %s!\n",
  5274. bdevname(rdev->bdev,b), mdname(mddev));
  5275. err = -EINVAL;
  5276. goto abort_export;
  5277. }
  5278. clear_bit(In_sync, &rdev->flags);
  5279. rdev->desc_nr = -1;
  5280. rdev->saved_raid_disk = -1;
  5281. err = bind_rdev_to_array(rdev, mddev);
  5282. if (err)
  5283. goto abort_export;
  5284. /*
  5285. * The rest should better be atomic, we can have disk failures
  5286. * noticed in interrupt contexts ...
  5287. */
  5288. rdev->raid_disk = -1;
  5289. md_update_sb(mddev, 1);
  5290. /*
  5291. * Kick recovery, maybe this spare has to be added to the
  5292. * array immediately.
  5293. */
  5294. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5295. md_wakeup_thread(mddev->thread);
  5296. md_new_event(mddev);
  5297. return 0;
  5298. abort_export:
  5299. export_rdev(rdev);
  5300. return err;
  5301. }
  5302. static int set_bitmap_file(struct mddev *mddev, int fd)
  5303. {
  5304. int err;
  5305. if (mddev->pers) {
  5306. if (!mddev->pers->quiesce)
  5307. return -EBUSY;
  5308. if (mddev->recovery || mddev->sync_thread)
  5309. return -EBUSY;
  5310. /* we should be able to change the bitmap.. */
  5311. }
  5312. if (fd >= 0) {
  5313. if (mddev->bitmap)
  5314. return -EEXIST; /* cannot add when bitmap is present */
  5315. mddev->bitmap_info.file = fget(fd);
  5316. if (mddev->bitmap_info.file == NULL) {
  5317. printk(KERN_ERR "%s: error: failed to get bitmap file\n",
  5318. mdname(mddev));
  5319. return -EBADF;
  5320. }
  5321. err = deny_bitmap_write_access(mddev->bitmap_info.file);
  5322. if (err) {
  5323. printk(KERN_ERR "%s: error: bitmap file is already in use\n",
  5324. mdname(mddev));
  5325. fput(mddev->bitmap_info.file);
  5326. mddev->bitmap_info.file = NULL;
  5327. return err;
  5328. }
  5329. mddev->bitmap_info.offset = 0; /* file overrides offset */
  5330. } else if (mddev->bitmap == NULL)
  5331. return -ENOENT; /* cannot remove what isn't there */
  5332. err = 0;
  5333. if (mddev->pers) {
  5334. mddev->pers->quiesce(mddev, 1);
  5335. if (fd >= 0) {
  5336. err = bitmap_create(mddev);
  5337. if (!err)
  5338. err = bitmap_load(mddev);
  5339. }
  5340. if (fd < 0 || err) {
  5341. bitmap_destroy(mddev);
  5342. fd = -1; /* make sure to put the file */
  5343. }
  5344. mddev->pers->quiesce(mddev, 0);
  5345. }
  5346. if (fd < 0) {
  5347. if (mddev->bitmap_info.file) {
  5348. restore_bitmap_write_access(mddev->bitmap_info.file);
  5349. fput(mddev->bitmap_info.file);
  5350. }
  5351. mddev->bitmap_info.file = NULL;
  5352. }
  5353. return err;
  5354. }
  5355. /*
  5356. * set_array_info is used two different ways
  5357. * The original usage is when creating a new array.
  5358. * In this usage, raid_disks is > 0 and it together with
  5359. * level, size, not_persistent,layout,chunksize determine the
  5360. * shape of the array.
  5361. * This will always create an array with a type-0.90.0 superblock.
  5362. * The newer usage is when assembling an array.
  5363. * In this case raid_disks will be 0, and the major_version field is
  5364. * use to determine which style super-blocks are to be found on the devices.
  5365. * The minor and patch _version numbers are also kept incase the
  5366. * super_block handler wishes to interpret them.
  5367. */
  5368. static int set_array_info(struct mddev * mddev, mdu_array_info_t *info)
  5369. {
  5370. if (info->raid_disks == 0) {
  5371. /* just setting version number for superblock loading */
  5372. if (info->major_version < 0 ||
  5373. info->major_version >= ARRAY_SIZE(super_types) ||
  5374. super_types[info->major_version].name == NULL) {
  5375. /* maybe try to auto-load a module? */
  5376. printk(KERN_INFO
  5377. "md: superblock version %d not known\n",
  5378. info->major_version);
  5379. return -EINVAL;
  5380. }
  5381. mddev->major_version = info->major_version;
  5382. mddev->minor_version = info->minor_version;
  5383. mddev->patch_version = info->patch_version;
  5384. mddev->persistent = !info->not_persistent;
  5385. /* ensure mddev_put doesn't delete this now that there
  5386. * is some minimal configuration.
  5387. */
  5388. mddev->ctime = get_seconds();
  5389. return 0;
  5390. }
  5391. mddev->major_version = MD_MAJOR_VERSION;
  5392. mddev->minor_version = MD_MINOR_VERSION;
  5393. mddev->patch_version = MD_PATCHLEVEL_VERSION;
  5394. mddev->ctime = get_seconds();
  5395. mddev->level = info->level;
  5396. mddev->clevel[0] = 0;
  5397. mddev->dev_sectors = 2 * (sector_t)info->size;
  5398. mddev->raid_disks = info->raid_disks;
  5399. /* don't set md_minor, it is determined by which /dev/md* was
  5400. * openned
  5401. */
  5402. if (info->state & (1<<MD_SB_CLEAN))
  5403. mddev->recovery_cp = MaxSector;
  5404. else
  5405. mddev->recovery_cp = 0;
  5406. mddev->persistent = ! info->not_persistent;
  5407. mddev->external = 0;
  5408. mddev->layout = info->layout;
  5409. mddev->chunk_sectors = info->chunk_size >> 9;
  5410. mddev->max_disks = MD_SB_DISKS;
  5411. if (mddev->persistent)
  5412. mddev->flags = 0;
  5413. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  5414. mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
  5415. mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
  5416. mddev->bitmap_info.offset = 0;
  5417. mddev->reshape_position = MaxSector;
  5418. /*
  5419. * Generate a 128 bit UUID
  5420. */
  5421. get_random_bytes(mddev->uuid, 16);
  5422. mddev->new_level = mddev->level;
  5423. mddev->new_chunk_sectors = mddev->chunk_sectors;
  5424. mddev->new_layout = mddev->layout;
  5425. mddev->delta_disks = 0;
  5426. mddev->reshape_backwards = 0;
  5427. return 0;
  5428. }
  5429. void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
  5430. {
  5431. WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
  5432. if (mddev->external_size)
  5433. return;
  5434. mddev->array_sectors = array_sectors;
  5435. }
  5436. EXPORT_SYMBOL(md_set_array_sectors);
  5437. static int update_size(struct mddev *mddev, sector_t num_sectors)
  5438. {
  5439. struct md_rdev *rdev;
  5440. int rv;
  5441. int fit = (num_sectors == 0);
  5442. if (mddev->pers->resize == NULL)
  5443. return -EINVAL;
  5444. /* The "num_sectors" is the number of sectors of each device that
  5445. * is used. This can only make sense for arrays with redundancy.
  5446. * linear and raid0 always use whatever space is available. We can only
  5447. * consider changing this number if no resync or reconstruction is
  5448. * happening, and if the new size is acceptable. It must fit before the
  5449. * sb_start or, if that is <data_offset, it must fit before the size
  5450. * of each device. If num_sectors is zero, we find the largest size
  5451. * that fits.
  5452. */
  5453. if (mddev->sync_thread)
  5454. return -EBUSY;
  5455. rdev_for_each(rdev, mddev) {
  5456. sector_t avail = rdev->sectors;
  5457. if (fit && (num_sectors == 0 || num_sectors > avail))
  5458. num_sectors = avail;
  5459. if (avail < num_sectors)
  5460. return -ENOSPC;
  5461. }
  5462. rv = mddev->pers->resize(mddev, num_sectors);
  5463. if (!rv)
  5464. revalidate_disk(mddev->gendisk);
  5465. return rv;
  5466. }
  5467. static int update_raid_disks(struct mddev *mddev, int raid_disks)
  5468. {
  5469. int rv;
  5470. struct md_rdev *rdev;
  5471. /* change the number of raid disks */
  5472. if (mddev->pers->check_reshape == NULL)
  5473. return -EINVAL;
  5474. if (raid_disks <= 0 ||
  5475. (mddev->max_disks && raid_disks >= mddev->max_disks))
  5476. return -EINVAL;
  5477. if (mddev->sync_thread || mddev->reshape_position != MaxSector)
  5478. return -EBUSY;
  5479. rdev_for_each(rdev, mddev) {
  5480. if (mddev->raid_disks < raid_disks &&
  5481. rdev->data_offset < rdev->new_data_offset)
  5482. return -EINVAL;
  5483. if (mddev->raid_disks > raid_disks &&
  5484. rdev->data_offset > rdev->new_data_offset)
  5485. return -EINVAL;
  5486. }
  5487. mddev->delta_disks = raid_disks - mddev->raid_disks;
  5488. if (mddev->delta_disks < 0)
  5489. mddev->reshape_backwards = 1;
  5490. else if (mddev->delta_disks > 0)
  5491. mddev->reshape_backwards = 0;
  5492. rv = mddev->pers->check_reshape(mddev);
  5493. if (rv < 0) {
  5494. mddev->delta_disks = 0;
  5495. mddev->reshape_backwards = 0;
  5496. }
  5497. return rv;
  5498. }
  5499. /*
  5500. * update_array_info is used to change the configuration of an
  5501. * on-line array.
  5502. * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
  5503. * fields in the info are checked against the array.
  5504. * Any differences that cannot be handled will cause an error.
  5505. * Normally, only one change can be managed at a time.
  5506. */
  5507. static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
  5508. {
  5509. int rv = 0;
  5510. int cnt = 0;
  5511. int state = 0;
  5512. /* calculate expected state,ignoring low bits */
  5513. if (mddev->bitmap && mddev->bitmap_info.offset)
  5514. state |= (1 << MD_SB_BITMAP_PRESENT);
  5515. if (mddev->major_version != info->major_version ||
  5516. mddev->minor_version != info->minor_version ||
  5517. /* mddev->patch_version != info->patch_version || */
  5518. mddev->ctime != info->ctime ||
  5519. mddev->level != info->level ||
  5520. /* mddev->layout != info->layout || */
  5521. !mddev->persistent != info->not_persistent||
  5522. mddev->chunk_sectors != info->chunk_size >> 9 ||
  5523. /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
  5524. ((state^info->state) & 0xfffffe00)
  5525. )
  5526. return -EINVAL;
  5527. /* Check there is only one change */
  5528. if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
  5529. cnt++;
  5530. if (mddev->raid_disks != info->raid_disks)
  5531. cnt++;
  5532. if (mddev->layout != info->layout)
  5533. cnt++;
  5534. if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
  5535. cnt++;
  5536. if (cnt == 0)
  5537. return 0;
  5538. if (cnt > 1)
  5539. return -EINVAL;
  5540. if (mddev->layout != info->layout) {
  5541. /* Change layout
  5542. * we don't need to do anything at the md level, the
  5543. * personality will take care of it all.
  5544. */
  5545. if (mddev->pers->check_reshape == NULL)
  5546. return -EINVAL;
  5547. else {
  5548. mddev->new_layout = info->layout;
  5549. rv = mddev->pers->check_reshape(mddev);
  5550. if (rv)
  5551. mddev->new_layout = mddev->layout;
  5552. return rv;
  5553. }
  5554. }
  5555. if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
  5556. rv = update_size(mddev, (sector_t)info->size * 2);
  5557. if (mddev->raid_disks != info->raid_disks)
  5558. rv = update_raid_disks(mddev, info->raid_disks);
  5559. if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
  5560. if (mddev->pers->quiesce == NULL)
  5561. return -EINVAL;
  5562. if (mddev->recovery || mddev->sync_thread)
  5563. return -EBUSY;
  5564. if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
  5565. /* add the bitmap */
  5566. if (mddev->bitmap)
  5567. return -EEXIST;
  5568. if (mddev->bitmap_info.default_offset == 0)
  5569. return -EINVAL;
  5570. mddev->bitmap_info.offset =
  5571. mddev->bitmap_info.default_offset;
  5572. mddev->bitmap_info.space =
  5573. mddev->bitmap_info.default_space;
  5574. mddev->pers->quiesce(mddev, 1);
  5575. rv = bitmap_create(mddev);
  5576. if (!rv)
  5577. rv = bitmap_load(mddev);
  5578. if (rv)
  5579. bitmap_destroy(mddev);
  5580. mddev->pers->quiesce(mddev, 0);
  5581. } else {
  5582. /* remove the bitmap */
  5583. if (!mddev->bitmap)
  5584. return -ENOENT;
  5585. if (mddev->bitmap->storage.file)
  5586. return -EINVAL;
  5587. mddev->pers->quiesce(mddev, 1);
  5588. bitmap_destroy(mddev);
  5589. mddev->pers->quiesce(mddev, 0);
  5590. mddev->bitmap_info.offset = 0;
  5591. }
  5592. }
  5593. md_update_sb(mddev, 1);
  5594. return rv;
  5595. }
  5596. static int set_disk_faulty(struct mddev *mddev, dev_t dev)
  5597. {
  5598. struct md_rdev *rdev;
  5599. int err = 0;
  5600. if (mddev->pers == NULL)
  5601. return -ENODEV;
  5602. rcu_read_lock();
  5603. rdev = find_rdev_rcu(mddev, dev);
  5604. if (!rdev)
  5605. err = -ENODEV;
  5606. else {
  5607. md_error(mddev, rdev);
  5608. if (!test_bit(Faulty, &rdev->flags))
  5609. err = -EBUSY;
  5610. }
  5611. rcu_read_unlock();
  5612. return err;
  5613. }
  5614. /*
  5615. * We have a problem here : there is no easy way to give a CHS
  5616. * virtual geometry. We currently pretend that we have a 2 heads
  5617. * 4 sectors (with a BIG number of cylinders...). This drives
  5618. * dosfs just mad... ;-)
  5619. */
  5620. static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  5621. {
  5622. struct mddev *mddev = bdev->bd_disk->private_data;
  5623. geo->heads = 2;
  5624. geo->sectors = 4;
  5625. geo->cylinders = mddev->array_sectors / 8;
  5626. return 0;
  5627. }
  5628. static int md_ioctl(struct block_device *bdev, fmode_t mode,
  5629. unsigned int cmd, unsigned long arg)
  5630. {
  5631. int err = 0;
  5632. void __user *argp = (void __user *)arg;
  5633. struct mddev *mddev = NULL;
  5634. int ro;
  5635. switch (cmd) {
  5636. case RAID_VERSION:
  5637. case GET_ARRAY_INFO:
  5638. case GET_DISK_INFO:
  5639. break;
  5640. default:
  5641. if (!capable(CAP_SYS_ADMIN))
  5642. return -EACCES;
  5643. }
  5644. /*
  5645. * Commands dealing with the RAID driver but not any
  5646. * particular array:
  5647. */
  5648. switch (cmd) {
  5649. case RAID_VERSION:
  5650. err = get_version(argp);
  5651. goto done;
  5652. case PRINT_RAID_DEBUG:
  5653. err = 0;
  5654. md_print_devices();
  5655. goto done;
  5656. #ifndef MODULE
  5657. case RAID_AUTORUN:
  5658. err = 0;
  5659. autostart_arrays(arg);
  5660. goto done;
  5661. #endif
  5662. default:;
  5663. }
  5664. /*
  5665. * Commands creating/starting a new array:
  5666. */
  5667. mddev = bdev->bd_disk->private_data;
  5668. if (!mddev) {
  5669. BUG();
  5670. goto abort;
  5671. }
  5672. /* Some actions do not requires the mutex */
  5673. switch (cmd) {
  5674. case GET_ARRAY_INFO:
  5675. if (!mddev->raid_disks && !mddev->external)
  5676. err = -ENODEV;
  5677. else
  5678. err = get_array_info(mddev, argp);
  5679. goto abort;
  5680. case GET_DISK_INFO:
  5681. if (!mddev->raid_disks && !mddev->external)
  5682. err = -ENODEV;
  5683. else
  5684. err = get_disk_info(mddev, argp);
  5685. goto abort;
  5686. case SET_DISK_FAULTY:
  5687. err = set_disk_faulty(mddev, new_decode_dev(arg));
  5688. goto abort;
  5689. }
  5690. if (cmd == ADD_NEW_DISK)
  5691. /* need to ensure md_delayed_delete() has completed */
  5692. flush_workqueue(md_misc_wq);
  5693. if (cmd == HOT_REMOVE_DISK)
  5694. /* need to ensure recovery thread has run */
  5695. wait_event_interruptible_timeout(mddev->sb_wait,
  5696. !test_bit(MD_RECOVERY_NEEDED,
  5697. &mddev->flags),
  5698. msecs_to_jiffies(5000));
  5699. if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
  5700. /* Need to flush page cache, and ensure no-one else opens
  5701. * and writes
  5702. */
  5703. mutex_lock(&mddev->open_mutex);
  5704. if (atomic_read(&mddev->openers) > 1) {
  5705. mutex_unlock(&mddev->open_mutex);
  5706. err = -EBUSY;
  5707. goto abort;
  5708. }
  5709. set_bit(MD_STILL_CLOSED, &mddev->flags);
  5710. mutex_unlock(&mddev->open_mutex);
  5711. sync_blockdev(bdev);
  5712. }
  5713. err = mddev_lock(mddev);
  5714. if (err) {
  5715. printk(KERN_INFO
  5716. "md: ioctl lock interrupted, reason %d, cmd %d\n",
  5717. err, cmd);
  5718. goto abort;
  5719. }
  5720. if (cmd == SET_ARRAY_INFO) {
  5721. mdu_array_info_t info;
  5722. if (!arg)
  5723. memset(&info, 0, sizeof(info));
  5724. else if (copy_from_user(&info, argp, sizeof(info))) {
  5725. err = -EFAULT;
  5726. goto abort_unlock;
  5727. }
  5728. if (mddev->pers) {
  5729. err = update_array_info(mddev, &info);
  5730. if (err) {
  5731. printk(KERN_WARNING "md: couldn't update"
  5732. " array info. %d\n", err);
  5733. goto abort_unlock;
  5734. }
  5735. goto done_unlock;
  5736. }
  5737. if (!list_empty(&mddev->disks)) {
  5738. printk(KERN_WARNING
  5739. "md: array %s already has disks!\n",
  5740. mdname(mddev));
  5741. err = -EBUSY;
  5742. goto abort_unlock;
  5743. }
  5744. if (mddev->raid_disks) {
  5745. printk(KERN_WARNING
  5746. "md: array %s already initialised!\n",
  5747. mdname(mddev));
  5748. err = -EBUSY;
  5749. goto abort_unlock;
  5750. }
  5751. err = set_array_info(mddev, &info);
  5752. if (err) {
  5753. printk(KERN_WARNING "md: couldn't set"
  5754. " array info. %d\n", err);
  5755. goto abort_unlock;
  5756. }
  5757. goto done_unlock;
  5758. }
  5759. /*
  5760. * Commands querying/configuring an existing array:
  5761. */
  5762. /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
  5763. * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
  5764. if ((!mddev->raid_disks && !mddev->external)
  5765. && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
  5766. && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
  5767. && cmd != GET_BITMAP_FILE) {
  5768. err = -ENODEV;
  5769. goto abort_unlock;
  5770. }
  5771. /*
  5772. * Commands even a read-only array can execute:
  5773. */
  5774. switch (cmd) {
  5775. case GET_BITMAP_FILE:
  5776. err = get_bitmap_file(mddev, argp);
  5777. goto done_unlock;
  5778. case RESTART_ARRAY_RW:
  5779. err = restart_array(mddev);
  5780. goto done_unlock;
  5781. case STOP_ARRAY:
  5782. err = do_md_stop(mddev, 0, bdev);
  5783. goto done_unlock;
  5784. case STOP_ARRAY_RO:
  5785. err = md_set_readonly(mddev, bdev);
  5786. goto done_unlock;
  5787. case HOT_REMOVE_DISK:
  5788. err = hot_remove_disk(mddev, new_decode_dev(arg));
  5789. goto done_unlock;
  5790. case ADD_NEW_DISK:
  5791. /* We can support ADD_NEW_DISK on read-only arrays
  5792. * on if we are re-adding a preexisting device.
  5793. * So require mddev->pers and MD_DISK_SYNC.
  5794. */
  5795. if (mddev->pers) {
  5796. mdu_disk_info_t info;
  5797. if (copy_from_user(&info, argp, sizeof(info)))
  5798. err = -EFAULT;
  5799. else if (!(info.state & (1<<MD_DISK_SYNC)))
  5800. /* Need to clear read-only for this */
  5801. break;
  5802. else
  5803. err = add_new_disk(mddev, &info);
  5804. goto done_unlock;
  5805. }
  5806. break;
  5807. case BLKROSET:
  5808. if (get_user(ro, (int __user *)(arg))) {
  5809. err = -EFAULT;
  5810. goto done_unlock;
  5811. }
  5812. err = -EINVAL;
  5813. /* if the bdev is going readonly the value of mddev->ro
  5814. * does not matter, no writes are coming
  5815. */
  5816. if (ro)
  5817. goto done_unlock;
  5818. /* are we are already prepared for writes? */
  5819. if (mddev->ro != 1)
  5820. goto done_unlock;
  5821. /* transitioning to readauto need only happen for
  5822. * arrays that call md_write_start
  5823. */
  5824. if (mddev->pers) {
  5825. err = restart_array(mddev);
  5826. if (err == 0) {
  5827. mddev->ro = 2;
  5828. set_disk_ro(mddev->gendisk, 0);
  5829. }
  5830. }
  5831. goto done_unlock;
  5832. }
  5833. /*
  5834. * The remaining ioctls are changing the state of the
  5835. * superblock, so we do not allow them on read-only arrays.
  5836. * However non-MD ioctls (e.g. get-size) will still come through
  5837. * here and hit the 'default' below, so only disallow
  5838. * 'md' ioctls, and switch to rw mode if started auto-readonly.
  5839. */
  5840. if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
  5841. if (mddev->ro == 2) {
  5842. mddev->ro = 0;
  5843. sysfs_notify_dirent_safe(mddev->sysfs_state);
  5844. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5845. /* mddev_unlock will wake thread */
  5846. /* If a device failed while we were read-only, we
  5847. * need to make sure the metadata is updated now.
  5848. */
  5849. if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
  5850. mddev_unlock(mddev);
  5851. wait_event(mddev->sb_wait,
  5852. !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
  5853. !test_bit(MD_CHANGE_PENDING, &mddev->flags));
  5854. mddev_lock(mddev);
  5855. }
  5856. } else {
  5857. err = -EROFS;
  5858. goto abort_unlock;
  5859. }
  5860. }
  5861. switch (cmd) {
  5862. case ADD_NEW_DISK:
  5863. {
  5864. mdu_disk_info_t info;
  5865. if (copy_from_user(&info, argp, sizeof(info)))
  5866. err = -EFAULT;
  5867. else
  5868. err = add_new_disk(mddev, &info);
  5869. goto done_unlock;
  5870. }
  5871. case HOT_ADD_DISK:
  5872. err = hot_add_disk(mddev, new_decode_dev(arg));
  5873. goto done_unlock;
  5874. case RUN_ARRAY:
  5875. err = do_md_run(mddev);
  5876. goto done_unlock;
  5877. case SET_BITMAP_FILE:
  5878. err = set_bitmap_file(mddev, (int)arg);
  5879. goto done_unlock;
  5880. default:
  5881. err = -EINVAL;
  5882. goto abort_unlock;
  5883. }
  5884. done_unlock:
  5885. abort_unlock:
  5886. if (mddev->hold_active == UNTIL_IOCTL &&
  5887. err != -EINVAL)
  5888. mddev->hold_active = 0;
  5889. mddev_unlock(mddev);
  5890. return err;
  5891. done:
  5892. if (err)
  5893. MD_BUG();
  5894. abort:
  5895. return err;
  5896. }
  5897. #ifdef CONFIG_COMPAT
  5898. static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
  5899. unsigned int cmd, unsigned long arg)
  5900. {
  5901. switch (cmd) {
  5902. case HOT_REMOVE_DISK:
  5903. case HOT_ADD_DISK:
  5904. case SET_DISK_FAULTY:
  5905. case SET_BITMAP_FILE:
  5906. /* These take in integer arg, do not convert */
  5907. break;
  5908. default:
  5909. arg = (unsigned long)compat_ptr(arg);
  5910. break;
  5911. }
  5912. return md_ioctl(bdev, mode, cmd, arg);
  5913. }
  5914. #endif /* CONFIG_COMPAT */
  5915. static int md_open(struct block_device *bdev, fmode_t mode)
  5916. {
  5917. /*
  5918. * Succeed if we can lock the mddev, which confirms that
  5919. * it isn't being stopped right now.
  5920. */
  5921. struct mddev *mddev = mddev_find(bdev->bd_dev);
  5922. int err;
  5923. if (!mddev)
  5924. return -ENODEV;
  5925. if (mddev->gendisk != bdev->bd_disk) {
  5926. /* we are racing with mddev_put which is discarding this
  5927. * bd_disk.
  5928. */
  5929. mddev_put(mddev);
  5930. /* Wait until bdev->bd_disk is definitely gone */
  5931. flush_workqueue(md_misc_wq);
  5932. /* Then retry the open from the top */
  5933. return -ERESTARTSYS;
  5934. }
  5935. BUG_ON(mddev != bdev->bd_disk->private_data);
  5936. if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
  5937. goto out;
  5938. err = 0;
  5939. atomic_inc(&mddev->openers);
  5940. clear_bit(MD_STILL_CLOSED, &mddev->flags);
  5941. mutex_unlock(&mddev->open_mutex);
  5942. check_disk_change(bdev);
  5943. out:
  5944. return err;
  5945. }
  5946. static void md_release(struct gendisk *disk, fmode_t mode)
  5947. {
  5948. struct mddev *mddev = disk->private_data;
  5949. BUG_ON(!mddev);
  5950. atomic_dec(&mddev->openers);
  5951. mddev_put(mddev);
  5952. }
  5953. static int md_media_changed(struct gendisk *disk)
  5954. {
  5955. struct mddev *mddev = disk->private_data;
  5956. return mddev->changed;
  5957. }
  5958. static int md_revalidate(struct gendisk *disk)
  5959. {
  5960. struct mddev *mddev = disk->private_data;
  5961. mddev->changed = 0;
  5962. return 0;
  5963. }
  5964. static const struct block_device_operations md_fops =
  5965. {
  5966. .owner = THIS_MODULE,
  5967. .open = md_open,
  5968. .release = md_release,
  5969. .ioctl = md_ioctl,
  5970. #ifdef CONFIG_COMPAT
  5971. .compat_ioctl = md_compat_ioctl,
  5972. #endif
  5973. .getgeo = md_getgeo,
  5974. .media_changed = md_media_changed,
  5975. .revalidate_disk= md_revalidate,
  5976. };
  5977. static int md_thread(void * arg)
  5978. {
  5979. struct md_thread *thread = arg;
  5980. /*
  5981. * md_thread is a 'system-thread', it's priority should be very
  5982. * high. We avoid resource deadlocks individually in each
  5983. * raid personality. (RAID5 does preallocation) We also use RR and
  5984. * the very same RT priority as kswapd, thus we will never get
  5985. * into a priority inversion deadlock.
  5986. *
  5987. * we definitely have to have equal or higher priority than
  5988. * bdflush, otherwise bdflush will deadlock if there are too
  5989. * many dirty RAID5 blocks.
  5990. */
  5991. allow_signal(SIGKILL);
  5992. while (!kthread_should_stop()) {
  5993. /* We need to wait INTERRUPTIBLE so that
  5994. * we don't add to the load-average.
  5995. * That means we need to be sure no signals are
  5996. * pending
  5997. */
  5998. if (signal_pending(current))
  5999. flush_signals(current);
  6000. wait_event_interruptible_timeout
  6001. (thread->wqueue,
  6002. test_bit(THREAD_WAKEUP, &thread->flags)
  6003. || kthread_should_stop(),
  6004. thread->timeout);
  6005. clear_bit(THREAD_WAKEUP, &thread->flags);
  6006. if (!kthread_should_stop())
  6007. thread->run(thread);
  6008. }
  6009. return 0;
  6010. }
  6011. void md_wakeup_thread(struct md_thread *thread)
  6012. {
  6013. if (thread) {
  6014. pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
  6015. set_bit(THREAD_WAKEUP, &thread->flags);
  6016. wake_up(&thread->wqueue);
  6017. }
  6018. }
  6019. struct md_thread *md_register_thread(void (*run) (struct md_thread *),
  6020. struct mddev *mddev, const char *name)
  6021. {
  6022. struct md_thread *thread;
  6023. thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
  6024. if (!thread)
  6025. return NULL;
  6026. init_waitqueue_head(&thread->wqueue);
  6027. thread->run = run;
  6028. thread->mddev = mddev;
  6029. thread->timeout = MAX_SCHEDULE_TIMEOUT;
  6030. thread->tsk = kthread_run(md_thread, thread,
  6031. "%s_%s",
  6032. mdname(thread->mddev),
  6033. name);
  6034. if (IS_ERR(thread->tsk)) {
  6035. kfree(thread);
  6036. return NULL;
  6037. }
  6038. return thread;
  6039. }
  6040. void md_unregister_thread(struct md_thread **threadp)
  6041. {
  6042. struct md_thread *thread = *threadp;
  6043. if (!thread)
  6044. return;
  6045. pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
  6046. /* Locking ensures that mddev_unlock does not wake_up a
  6047. * non-existent thread
  6048. */
  6049. spin_lock(&pers_lock);
  6050. *threadp = NULL;
  6051. spin_unlock(&pers_lock);
  6052. kthread_stop(thread->tsk);
  6053. kfree(thread);
  6054. }
  6055. void md_error(struct mddev *mddev, struct md_rdev *rdev)
  6056. {
  6057. if (!mddev) {
  6058. MD_BUG();
  6059. return;
  6060. }
  6061. if (!rdev || test_bit(Faulty, &rdev->flags))
  6062. return;
  6063. if (!mddev->pers || !mddev->pers->error_handler)
  6064. return;
  6065. mddev->pers->error_handler(mddev,rdev);
  6066. if (mddev->degraded)
  6067. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  6068. sysfs_notify_dirent_safe(rdev->sysfs_state);
  6069. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6070. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6071. md_wakeup_thread(mddev->thread);
  6072. if (mddev->event_work.func)
  6073. queue_work(md_misc_wq, &mddev->event_work);
  6074. md_new_event_inintr(mddev);
  6075. }
  6076. /* seq_file implementation /proc/mdstat */
  6077. static void status_unused(struct seq_file *seq)
  6078. {
  6079. int i = 0;
  6080. struct md_rdev *rdev;
  6081. seq_printf(seq, "unused devices: ");
  6082. list_for_each_entry(rdev, &pending_raid_disks, same_set) {
  6083. char b[BDEVNAME_SIZE];
  6084. i++;
  6085. seq_printf(seq, "%s ",
  6086. bdevname(rdev->bdev,b));
  6087. }
  6088. if (!i)
  6089. seq_printf(seq, "<none>");
  6090. seq_printf(seq, "\n");
  6091. }
  6092. static void status_resync(struct seq_file *seq, struct mddev * mddev)
  6093. {
  6094. sector_t max_sectors, resync, res;
  6095. unsigned long dt, db;
  6096. sector_t rt;
  6097. int scale;
  6098. unsigned int per_milli;
  6099. if (mddev->curr_resync <= 3)
  6100. resync = 0;
  6101. else
  6102. resync = mddev->curr_resync
  6103. - atomic_read(&mddev->recovery_active);
  6104. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
  6105. test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  6106. max_sectors = mddev->resync_max_sectors;
  6107. else
  6108. max_sectors = mddev->dev_sectors;
  6109. /*
  6110. * Should not happen.
  6111. */
  6112. if (!max_sectors) {
  6113. MD_BUG();
  6114. return;
  6115. }
  6116. /* Pick 'scale' such that (resync>>scale)*1000 will fit
  6117. * in a sector_t, and (max_sectors>>scale) will fit in a
  6118. * u32, as those are the requirements for sector_div.
  6119. * Thus 'scale' must be at least 10
  6120. */
  6121. scale = 10;
  6122. if (sizeof(sector_t) > sizeof(unsigned long)) {
  6123. while ( max_sectors/2 > (1ULL<<(scale+32)))
  6124. scale++;
  6125. }
  6126. res = (resync>>scale)*1000;
  6127. sector_div(res, (u32)((max_sectors>>scale)+1));
  6128. per_milli = res;
  6129. {
  6130. int i, x = per_milli/50, y = 20-x;
  6131. seq_printf(seq, "[");
  6132. for (i = 0; i < x; i++)
  6133. seq_printf(seq, "=");
  6134. seq_printf(seq, ">");
  6135. for (i = 0; i < y; i++)
  6136. seq_printf(seq, ".");
  6137. seq_printf(seq, "] ");
  6138. }
  6139. seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
  6140. (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
  6141. "reshape" :
  6142. (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
  6143. "check" :
  6144. (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
  6145. "resync" : "recovery"))),
  6146. per_milli/10, per_milli % 10,
  6147. (unsigned long long) resync/2,
  6148. (unsigned long long) max_sectors/2);
  6149. /*
  6150. * dt: time from mark until now
  6151. * db: blocks written from mark until now
  6152. * rt: remaining time
  6153. *
  6154. * rt is a sector_t, so could be 32bit or 64bit.
  6155. * So we divide before multiply in case it is 32bit and close
  6156. * to the limit.
  6157. * We scale the divisor (db) by 32 to avoid losing precision
  6158. * near the end of resync when the number of remaining sectors
  6159. * is close to 'db'.
  6160. * We then divide rt by 32 after multiplying by db to compensate.
  6161. * The '+1' avoids division by zero if db is very small.
  6162. */
  6163. dt = ((jiffies - mddev->resync_mark) / HZ);
  6164. if (!dt) dt++;
  6165. db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
  6166. - mddev->resync_mark_cnt;
  6167. rt = max_sectors - resync; /* number of remaining sectors */
  6168. sector_div(rt, db/32+1);
  6169. rt *= dt;
  6170. rt >>= 5;
  6171. seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
  6172. ((unsigned long)rt % 60)/6);
  6173. seq_printf(seq, " speed=%ldK/sec", db/2/dt);
  6174. }
  6175. static void *md_seq_start(struct seq_file *seq, loff_t *pos)
  6176. {
  6177. struct list_head *tmp;
  6178. loff_t l = *pos;
  6179. struct mddev *mddev;
  6180. if (l >= 0x10000)
  6181. return NULL;
  6182. if (!l--)
  6183. /* header */
  6184. return (void*)1;
  6185. spin_lock(&all_mddevs_lock);
  6186. list_for_each(tmp,&all_mddevs)
  6187. if (!l--) {
  6188. mddev = list_entry(tmp, struct mddev, all_mddevs);
  6189. mddev_get(mddev);
  6190. spin_unlock(&all_mddevs_lock);
  6191. return mddev;
  6192. }
  6193. spin_unlock(&all_mddevs_lock);
  6194. if (!l--)
  6195. return (void*)2;/* tail */
  6196. return NULL;
  6197. }
  6198. static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  6199. {
  6200. struct list_head *tmp;
  6201. struct mddev *next_mddev, *mddev = v;
  6202. ++*pos;
  6203. if (v == (void*)2)
  6204. return NULL;
  6205. spin_lock(&all_mddevs_lock);
  6206. if (v == (void*)1)
  6207. tmp = all_mddevs.next;
  6208. else
  6209. tmp = mddev->all_mddevs.next;
  6210. if (tmp != &all_mddevs)
  6211. next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
  6212. else {
  6213. next_mddev = (void*)2;
  6214. *pos = 0x10000;
  6215. }
  6216. spin_unlock(&all_mddevs_lock);
  6217. if (v != (void*)1)
  6218. mddev_put(mddev);
  6219. return next_mddev;
  6220. }
  6221. static void md_seq_stop(struct seq_file *seq, void *v)
  6222. {
  6223. struct mddev *mddev = v;
  6224. if (mddev && v != (void*)1 && v != (void*)2)
  6225. mddev_put(mddev);
  6226. }
  6227. static int md_seq_show(struct seq_file *seq, void *v)
  6228. {
  6229. struct mddev *mddev = v;
  6230. sector_t sectors;
  6231. struct md_rdev *rdev;
  6232. if (v == (void*)1) {
  6233. struct md_personality *pers;
  6234. seq_printf(seq, "Personalities : ");
  6235. spin_lock(&pers_lock);
  6236. list_for_each_entry(pers, &pers_list, list)
  6237. seq_printf(seq, "[%s] ", pers->name);
  6238. spin_unlock(&pers_lock);
  6239. seq_printf(seq, "\n");
  6240. seq->poll_event = atomic_read(&md_event_count);
  6241. return 0;
  6242. }
  6243. if (v == (void*)2) {
  6244. status_unused(seq);
  6245. return 0;
  6246. }
  6247. if (mddev_lock(mddev) < 0)
  6248. return -EINTR;
  6249. if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
  6250. seq_printf(seq, "%s : %sactive", mdname(mddev),
  6251. mddev->pers ? "" : "in");
  6252. if (mddev->pers) {
  6253. if (mddev->ro==1)
  6254. seq_printf(seq, " (read-only)");
  6255. if (mddev->ro==2)
  6256. seq_printf(seq, " (auto-read-only)");
  6257. seq_printf(seq, " %s", mddev->pers->name);
  6258. }
  6259. sectors = 0;
  6260. rdev_for_each(rdev, mddev) {
  6261. char b[BDEVNAME_SIZE];
  6262. seq_printf(seq, " %s[%d]",
  6263. bdevname(rdev->bdev,b), rdev->desc_nr);
  6264. if (test_bit(WriteMostly, &rdev->flags))
  6265. seq_printf(seq, "(W)");
  6266. if (test_bit(Faulty, &rdev->flags)) {
  6267. seq_printf(seq, "(F)");
  6268. continue;
  6269. }
  6270. if (rdev->raid_disk < 0)
  6271. seq_printf(seq, "(S)"); /* spare */
  6272. if (test_bit(Replacement, &rdev->flags))
  6273. seq_printf(seq, "(R)");
  6274. sectors += rdev->sectors;
  6275. }
  6276. if (!list_empty(&mddev->disks)) {
  6277. if (mddev->pers)
  6278. seq_printf(seq, "\n %llu blocks",
  6279. (unsigned long long)
  6280. mddev->array_sectors / 2);
  6281. else
  6282. seq_printf(seq, "\n %llu blocks",
  6283. (unsigned long long)sectors / 2);
  6284. }
  6285. if (mddev->persistent) {
  6286. if (mddev->major_version != 0 ||
  6287. mddev->minor_version != 90) {
  6288. seq_printf(seq," super %d.%d",
  6289. mddev->major_version,
  6290. mddev->minor_version);
  6291. }
  6292. } else if (mddev->external)
  6293. seq_printf(seq, " super external:%s",
  6294. mddev->metadata_type);
  6295. else
  6296. seq_printf(seq, " super non-persistent");
  6297. if (mddev->pers) {
  6298. mddev->pers->status(seq, mddev);
  6299. seq_printf(seq, "\n ");
  6300. if (mddev->pers->sync_request) {
  6301. if (mddev->curr_resync > 2) {
  6302. status_resync(seq, mddev);
  6303. seq_printf(seq, "\n ");
  6304. } else if (mddev->curr_resync >= 1)
  6305. seq_printf(seq, "\tresync=DELAYED\n ");
  6306. else if (mddev->recovery_cp < MaxSector)
  6307. seq_printf(seq, "\tresync=PENDING\n ");
  6308. }
  6309. } else
  6310. seq_printf(seq, "\n ");
  6311. bitmap_status(seq, mddev->bitmap);
  6312. seq_printf(seq, "\n");
  6313. }
  6314. mddev_unlock(mddev);
  6315. return 0;
  6316. }
  6317. static const struct seq_operations md_seq_ops = {
  6318. .start = md_seq_start,
  6319. .next = md_seq_next,
  6320. .stop = md_seq_stop,
  6321. .show = md_seq_show,
  6322. };
  6323. static int md_seq_open(struct inode *inode, struct file *file)
  6324. {
  6325. struct seq_file *seq;
  6326. int error;
  6327. error = seq_open(file, &md_seq_ops);
  6328. if (error)
  6329. return error;
  6330. seq = file->private_data;
  6331. seq->poll_event = atomic_read(&md_event_count);
  6332. return error;
  6333. }
  6334. static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
  6335. {
  6336. struct seq_file *seq = filp->private_data;
  6337. int mask;
  6338. poll_wait(filp, &md_event_waiters, wait);
  6339. /* always allow read */
  6340. mask = POLLIN | POLLRDNORM;
  6341. if (seq->poll_event != atomic_read(&md_event_count))
  6342. mask |= POLLERR | POLLPRI;
  6343. return mask;
  6344. }
  6345. static const struct file_operations md_seq_fops = {
  6346. .owner = THIS_MODULE,
  6347. .open = md_seq_open,
  6348. .read = seq_read,
  6349. .llseek = seq_lseek,
  6350. .release = seq_release_private,
  6351. .poll = mdstat_poll,
  6352. };
  6353. int register_md_personality(struct md_personality *p)
  6354. {
  6355. spin_lock(&pers_lock);
  6356. list_add_tail(&p->list, &pers_list);
  6357. printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
  6358. spin_unlock(&pers_lock);
  6359. return 0;
  6360. }
  6361. int unregister_md_personality(struct md_personality *p)
  6362. {
  6363. printk(KERN_INFO "md: %s personality unregistered\n", p->name);
  6364. spin_lock(&pers_lock);
  6365. list_del_init(&p->list);
  6366. spin_unlock(&pers_lock);
  6367. return 0;
  6368. }
  6369. static int is_mddev_idle(struct mddev *mddev, int init)
  6370. {
  6371. struct md_rdev * rdev;
  6372. int idle;
  6373. int curr_events;
  6374. idle = 1;
  6375. rcu_read_lock();
  6376. rdev_for_each_rcu(rdev, mddev) {
  6377. struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
  6378. curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
  6379. (int)part_stat_read(&disk->part0, sectors[1]) -
  6380. atomic_read(&disk->sync_io);
  6381. /* sync IO will cause sync_io to increase before the disk_stats
  6382. * as sync_io is counted when a request starts, and
  6383. * disk_stats is counted when it completes.
  6384. * So resync activity will cause curr_events to be smaller than
  6385. * when there was no such activity.
  6386. * non-sync IO will cause disk_stat to increase without
  6387. * increasing sync_io so curr_events will (eventually)
  6388. * be larger than it was before. Once it becomes
  6389. * substantially larger, the test below will cause
  6390. * the array to appear non-idle, and resync will slow
  6391. * down.
  6392. * If there is a lot of outstanding resync activity when
  6393. * we set last_event to curr_events, then all that activity
  6394. * completing might cause the array to appear non-idle
  6395. * and resync will be slowed down even though there might
  6396. * not have been non-resync activity. This will only
  6397. * happen once though. 'last_events' will soon reflect
  6398. * the state where there is little or no outstanding
  6399. * resync requests, and further resync activity will
  6400. * always make curr_events less than last_events.
  6401. *
  6402. */
  6403. if (init || curr_events - rdev->last_events > 64) {
  6404. rdev->last_events = curr_events;
  6405. idle = 0;
  6406. }
  6407. }
  6408. rcu_read_unlock();
  6409. return idle;
  6410. }
  6411. void md_done_sync(struct mddev *mddev, int blocks, int ok)
  6412. {
  6413. /* another "blocks" (512byte) blocks have been synced */
  6414. atomic_sub(blocks, &mddev->recovery_active);
  6415. wake_up(&mddev->recovery_wait);
  6416. if (!ok) {
  6417. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6418. set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
  6419. md_wakeup_thread(mddev->thread);
  6420. // stop recovery, signal do_sync ....
  6421. }
  6422. }
  6423. /* md_write_start(mddev, bi)
  6424. * If we need to update some array metadata (e.g. 'active' flag
  6425. * in superblock) before writing, schedule a superblock update
  6426. * and wait for it to complete.
  6427. */
  6428. void md_write_start(struct mddev *mddev, struct bio *bi)
  6429. {
  6430. int did_change = 0;
  6431. if (bio_data_dir(bi) != WRITE)
  6432. return;
  6433. BUG_ON(mddev->ro == 1);
  6434. if (mddev->ro == 2) {
  6435. /* need to switch to read/write */
  6436. mddev->ro = 0;
  6437. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6438. md_wakeup_thread(mddev->thread);
  6439. md_wakeup_thread(mddev->sync_thread);
  6440. did_change = 1;
  6441. }
  6442. atomic_inc(&mddev->writes_pending);
  6443. if (mddev->safemode == 1)
  6444. mddev->safemode = 0;
  6445. if (mddev->in_sync) {
  6446. spin_lock_irq(&mddev->write_lock);
  6447. if (mddev->in_sync) {
  6448. mddev->in_sync = 0;
  6449. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  6450. set_bit(MD_CHANGE_PENDING, &mddev->flags);
  6451. md_wakeup_thread(mddev->thread);
  6452. did_change = 1;
  6453. }
  6454. spin_unlock_irq(&mddev->write_lock);
  6455. }
  6456. if (did_change)
  6457. sysfs_notify_dirent_safe(mddev->sysfs_state);
  6458. wait_event(mddev->sb_wait,
  6459. !test_bit(MD_CHANGE_PENDING, &mddev->flags));
  6460. }
  6461. void md_write_end(struct mddev *mddev)
  6462. {
  6463. if (atomic_dec_and_test(&mddev->writes_pending)) {
  6464. if (mddev->safemode == 2)
  6465. md_wakeup_thread(mddev->thread);
  6466. else if (mddev->safemode_delay)
  6467. mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
  6468. }
  6469. }
  6470. /* md_allow_write(mddev)
  6471. * Calling this ensures that the array is marked 'active' so that writes
  6472. * may proceed without blocking. It is important to call this before
  6473. * attempting a GFP_KERNEL allocation while holding the mddev lock.
  6474. * Must be called with mddev_lock held.
  6475. *
  6476. * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
  6477. * is dropped, so return -EAGAIN after notifying userspace.
  6478. */
  6479. int md_allow_write(struct mddev *mddev)
  6480. {
  6481. if (!mddev->pers)
  6482. return 0;
  6483. if (mddev->ro)
  6484. return 0;
  6485. if (!mddev->pers->sync_request)
  6486. return 0;
  6487. spin_lock_irq(&mddev->write_lock);
  6488. if (mddev->in_sync) {
  6489. mddev->in_sync = 0;
  6490. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  6491. set_bit(MD_CHANGE_PENDING, &mddev->flags);
  6492. if (mddev->safemode_delay &&
  6493. mddev->safemode == 0)
  6494. mddev->safemode = 1;
  6495. spin_unlock_irq(&mddev->write_lock);
  6496. md_update_sb(mddev, 0);
  6497. sysfs_notify_dirent_safe(mddev->sysfs_state);
  6498. } else
  6499. spin_unlock_irq(&mddev->write_lock);
  6500. if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
  6501. return -EAGAIN;
  6502. else
  6503. return 0;
  6504. }
  6505. EXPORT_SYMBOL_GPL(md_allow_write);
  6506. #define SYNC_MARKS 10
  6507. #define SYNC_MARK_STEP (3*HZ)
  6508. #define UPDATE_FREQUENCY (5*60*HZ)
  6509. void md_do_sync(struct md_thread *thread)
  6510. {
  6511. struct mddev *mddev = thread->mddev;
  6512. struct mddev *mddev2;
  6513. unsigned int currspeed = 0,
  6514. window;
  6515. sector_t max_sectors,j, io_sectors;
  6516. unsigned long mark[SYNC_MARKS];
  6517. unsigned long update_time;
  6518. sector_t mark_cnt[SYNC_MARKS];
  6519. int last_mark,m;
  6520. struct list_head *tmp;
  6521. sector_t last_check;
  6522. int skipped = 0;
  6523. struct md_rdev *rdev;
  6524. char *desc, *action = NULL;
  6525. struct blk_plug plug;
  6526. /* just incase thread restarts... */
  6527. if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
  6528. return;
  6529. if (mddev->ro) /* never try to sync a read-only array */
  6530. return;
  6531. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  6532. if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
  6533. desc = "data-check";
  6534. action = "check";
  6535. } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  6536. desc = "requested-resync";
  6537. action = "repair";
  6538. } else
  6539. desc = "resync";
  6540. } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  6541. desc = "reshape";
  6542. else
  6543. desc = "recovery";
  6544. mddev->last_sync_action = action ?: desc;
  6545. /* we overload curr_resync somewhat here.
  6546. * 0 == not engaged in resync at all
  6547. * 2 == checking that there is no conflict with another sync
  6548. * 1 == like 2, but have yielded to allow conflicting resync to
  6549. * commense
  6550. * other == active in resync - this many blocks
  6551. *
  6552. * Before starting a resync we must have set curr_resync to
  6553. * 2, and then checked that every "conflicting" array has curr_resync
  6554. * less than ours. When we find one that is the same or higher
  6555. * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
  6556. * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
  6557. * This will mean we have to start checking from the beginning again.
  6558. *
  6559. */
  6560. do {
  6561. mddev->curr_resync = 2;
  6562. try_again:
  6563. if (kthread_should_stop())
  6564. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6565. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  6566. goto skip;
  6567. for_each_mddev(mddev2, tmp) {
  6568. if (mddev2 == mddev)
  6569. continue;
  6570. if (!mddev->parallel_resync
  6571. && mddev2->curr_resync
  6572. && match_mddev_units(mddev, mddev2)) {
  6573. DEFINE_WAIT(wq);
  6574. if (mddev < mddev2 && mddev->curr_resync == 2) {
  6575. /* arbitrarily yield */
  6576. mddev->curr_resync = 1;
  6577. wake_up(&resync_wait);
  6578. }
  6579. if (mddev > mddev2 && mddev->curr_resync == 1)
  6580. /* no need to wait here, we can wait the next
  6581. * time 'round when curr_resync == 2
  6582. */
  6583. continue;
  6584. /* We need to wait 'interruptible' so as not to
  6585. * contribute to the load average, and not to
  6586. * be caught by 'softlockup'
  6587. */
  6588. prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
  6589. if (!kthread_should_stop() &&
  6590. mddev2->curr_resync >= mddev->curr_resync) {
  6591. printk(KERN_INFO "md: delaying %s of %s"
  6592. " until %s has finished (they"
  6593. " share one or more physical units)\n",
  6594. desc, mdname(mddev), mdname(mddev2));
  6595. mddev_put(mddev2);
  6596. if (signal_pending(current))
  6597. flush_signals(current);
  6598. schedule();
  6599. finish_wait(&resync_wait, &wq);
  6600. goto try_again;
  6601. }
  6602. finish_wait(&resync_wait, &wq);
  6603. }
  6604. }
  6605. } while (mddev->curr_resync < 2);
  6606. j = 0;
  6607. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  6608. /* resync follows the size requested by the personality,
  6609. * which defaults to physical size, but can be virtual size
  6610. */
  6611. max_sectors = mddev->resync_max_sectors;
  6612. atomic64_set(&mddev->resync_mismatches, 0);
  6613. /* we don't use the checkpoint if there's a bitmap */
  6614. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  6615. j = mddev->resync_min;
  6616. else if (!mddev->bitmap)
  6617. j = mddev->recovery_cp;
  6618. } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  6619. max_sectors = mddev->resync_max_sectors;
  6620. else {
  6621. /* recovery follows the physical size of devices */
  6622. max_sectors = mddev->dev_sectors;
  6623. j = MaxSector;
  6624. rcu_read_lock();
  6625. rdev_for_each_rcu(rdev, mddev)
  6626. if (rdev->raid_disk >= 0 &&
  6627. !test_bit(Faulty, &rdev->flags) &&
  6628. !test_bit(In_sync, &rdev->flags) &&
  6629. rdev->recovery_offset < j)
  6630. j = rdev->recovery_offset;
  6631. rcu_read_unlock();
  6632. }
  6633. printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
  6634. printk(KERN_INFO "md: minimum _guaranteed_ speed:"
  6635. " %d KB/sec/disk.\n", speed_min(mddev));
  6636. printk(KERN_INFO "md: using maximum available idle IO bandwidth "
  6637. "(but not more than %d KB/sec) for %s.\n",
  6638. speed_max(mddev), desc);
  6639. is_mddev_idle(mddev, 1); /* this initializes IO event counters */
  6640. io_sectors = 0;
  6641. for (m = 0; m < SYNC_MARKS; m++) {
  6642. mark[m] = jiffies;
  6643. mark_cnt[m] = io_sectors;
  6644. }
  6645. last_mark = 0;
  6646. mddev->resync_mark = mark[last_mark];
  6647. mddev->resync_mark_cnt = mark_cnt[last_mark];
  6648. /*
  6649. * Tune reconstruction:
  6650. */
  6651. window = 32*(PAGE_SIZE/512);
  6652. printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
  6653. window/2, (unsigned long long)max_sectors/2);
  6654. atomic_set(&mddev->recovery_active, 0);
  6655. last_check = 0;
  6656. if (j>2) {
  6657. printk(KERN_INFO
  6658. "md: resuming %s of %s from checkpoint.\n",
  6659. desc, mdname(mddev));
  6660. mddev->curr_resync = j;
  6661. } else
  6662. mddev->curr_resync = 3; /* no longer delayed */
  6663. mddev->curr_resync_completed = j;
  6664. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  6665. md_new_event(mddev);
  6666. update_time = jiffies;
  6667. blk_start_plug(&plug);
  6668. while (j < max_sectors) {
  6669. sector_t sectors;
  6670. skipped = 0;
  6671. if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  6672. ((mddev->curr_resync > mddev->curr_resync_completed &&
  6673. (mddev->curr_resync - mddev->curr_resync_completed)
  6674. > (max_sectors >> 4)) ||
  6675. time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
  6676. (j - mddev->curr_resync_completed)*2
  6677. >= mddev->resync_max - mddev->curr_resync_completed
  6678. )) {
  6679. /* time to update curr_resync_completed */
  6680. wait_event(mddev->recovery_wait,
  6681. atomic_read(&mddev->recovery_active) == 0);
  6682. mddev->curr_resync_completed = j;
  6683. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
  6684. j > mddev->recovery_cp)
  6685. mddev->recovery_cp = j;
  6686. update_time = jiffies;
  6687. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  6688. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  6689. }
  6690. while (j >= mddev->resync_max && !kthread_should_stop()) {
  6691. /* As this condition is controlled by user-space,
  6692. * we can block indefinitely, so use '_interruptible'
  6693. * to avoid triggering warnings.
  6694. */
  6695. flush_signals(current); /* just in case */
  6696. wait_event_interruptible(mddev->recovery_wait,
  6697. mddev->resync_max > j
  6698. || kthread_should_stop());
  6699. }
  6700. if (kthread_should_stop())
  6701. goto interrupted;
  6702. sectors = mddev->pers->sync_request(mddev, j, &skipped,
  6703. currspeed < speed_min(mddev));
  6704. if (sectors == 0) {
  6705. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6706. goto out;
  6707. }
  6708. if (!skipped) { /* actual IO requested */
  6709. io_sectors += sectors;
  6710. atomic_add(sectors, &mddev->recovery_active);
  6711. }
  6712. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  6713. break;
  6714. j += sectors;
  6715. if (j > 2)
  6716. mddev->curr_resync = j;
  6717. mddev->curr_mark_cnt = io_sectors;
  6718. if (last_check == 0)
  6719. /* this is the earliest that rebuild will be
  6720. * visible in /proc/mdstat
  6721. */
  6722. md_new_event(mddev);
  6723. if (last_check + window > io_sectors || j == max_sectors)
  6724. continue;
  6725. last_check = io_sectors;
  6726. repeat:
  6727. if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
  6728. /* step marks */
  6729. int next = (last_mark+1) % SYNC_MARKS;
  6730. mddev->resync_mark = mark[next];
  6731. mddev->resync_mark_cnt = mark_cnt[next];
  6732. mark[next] = jiffies;
  6733. mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
  6734. last_mark = next;
  6735. }
  6736. if (kthread_should_stop())
  6737. goto interrupted;
  6738. /*
  6739. * this loop exits only if either when we are slower than
  6740. * the 'hard' speed limit, or the system was IO-idle for
  6741. * a jiffy.
  6742. * the system might be non-idle CPU-wise, but we only care
  6743. * about not overloading the IO subsystem. (things like an
  6744. * e2fsck being done on the RAID array should execute fast)
  6745. */
  6746. cond_resched();
  6747. currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
  6748. /((jiffies-mddev->resync_mark)/HZ +1) +1;
  6749. if (currspeed > speed_min(mddev)) {
  6750. if ((currspeed > speed_max(mddev)) ||
  6751. !is_mddev_idle(mddev, 0)) {
  6752. msleep(500);
  6753. goto repeat;
  6754. }
  6755. }
  6756. }
  6757. printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
  6758. /*
  6759. * this also signals 'finished resyncing' to md_stop
  6760. */
  6761. out:
  6762. blk_finish_plug(&plug);
  6763. wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
  6764. /* tell personality that we are finished */
  6765. mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
  6766. if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
  6767. mddev->curr_resync > 2) {
  6768. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  6769. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  6770. if (mddev->curr_resync >= mddev->recovery_cp) {
  6771. printk(KERN_INFO
  6772. "md: checkpointing %s of %s.\n",
  6773. desc, mdname(mddev));
  6774. if (test_bit(MD_RECOVERY_ERROR,
  6775. &mddev->recovery))
  6776. mddev->recovery_cp =
  6777. mddev->curr_resync_completed;
  6778. else
  6779. mddev->recovery_cp =
  6780. mddev->curr_resync;
  6781. }
  6782. } else
  6783. mddev->recovery_cp = MaxSector;
  6784. } else {
  6785. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  6786. mddev->curr_resync = MaxSector;
  6787. rcu_read_lock();
  6788. rdev_for_each_rcu(rdev, mddev)
  6789. if (rdev->raid_disk >= 0 &&
  6790. mddev->delta_disks >= 0 &&
  6791. !test_bit(Faulty, &rdev->flags) &&
  6792. !test_bit(In_sync, &rdev->flags) &&
  6793. rdev->recovery_offset < mddev->curr_resync)
  6794. rdev->recovery_offset = mddev->curr_resync;
  6795. rcu_read_unlock();
  6796. }
  6797. }
  6798. skip:
  6799. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  6800. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  6801. /* We completed so min/max setting can be forgotten if used. */
  6802. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  6803. mddev->resync_min = 0;
  6804. mddev->resync_max = MaxSector;
  6805. } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  6806. mddev->resync_min = mddev->curr_resync_completed;
  6807. mddev->curr_resync = 0;
  6808. wake_up(&resync_wait);
  6809. set_bit(MD_RECOVERY_DONE, &mddev->recovery);
  6810. md_wakeup_thread(mddev->thread);
  6811. return;
  6812. interrupted:
  6813. /*
  6814. * got a signal, exit.
  6815. */
  6816. printk(KERN_INFO
  6817. "md: md_do_sync() got signal ... exiting\n");
  6818. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6819. goto out;
  6820. }
  6821. EXPORT_SYMBOL_GPL(md_do_sync);
  6822. static int remove_and_add_spares(struct mddev *mddev,
  6823. struct md_rdev *this)
  6824. {
  6825. struct md_rdev *rdev;
  6826. int spares = 0;
  6827. int removed = 0;
  6828. rdev_for_each(rdev, mddev)
  6829. if ((this == NULL || rdev == this) &&
  6830. rdev->raid_disk >= 0 &&
  6831. !test_bit(Blocked, &rdev->flags) &&
  6832. (test_bit(Faulty, &rdev->flags) ||
  6833. ! test_bit(In_sync, &rdev->flags)) &&
  6834. atomic_read(&rdev->nr_pending)==0) {
  6835. if (mddev->pers->hot_remove_disk(
  6836. mddev, rdev) == 0) {
  6837. sysfs_unlink_rdev(mddev, rdev);
  6838. rdev->raid_disk = -1;
  6839. removed++;
  6840. }
  6841. }
  6842. if (removed && mddev->kobj.sd)
  6843. sysfs_notify(&mddev->kobj, NULL, "degraded");
  6844. if (this)
  6845. goto no_add;
  6846. rdev_for_each(rdev, mddev) {
  6847. if (rdev->raid_disk >= 0 &&
  6848. !test_bit(In_sync, &rdev->flags) &&
  6849. !test_bit(Faulty, &rdev->flags))
  6850. spares++;
  6851. if (rdev->raid_disk >= 0)
  6852. continue;
  6853. if (test_bit(Faulty, &rdev->flags))
  6854. continue;
  6855. if (mddev->ro &&
  6856. rdev->saved_raid_disk < 0)
  6857. continue;
  6858. rdev->recovery_offset = 0;
  6859. if (mddev->pers->
  6860. hot_add_disk(mddev, rdev) == 0) {
  6861. if (sysfs_link_rdev(mddev, rdev))
  6862. /* failure here is OK */;
  6863. spares++;
  6864. md_new_event(mddev);
  6865. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  6866. }
  6867. }
  6868. no_add:
  6869. if (removed)
  6870. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  6871. return spares;
  6872. }
  6873. /*
  6874. * This routine is regularly called by all per-raid-array threads to
  6875. * deal with generic issues like resync and super-block update.
  6876. * Raid personalities that don't have a thread (linear/raid0) do not
  6877. * need this as they never do any recovery or update the superblock.
  6878. *
  6879. * It does not do any resync itself, but rather "forks" off other threads
  6880. * to do that as needed.
  6881. * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
  6882. * "->recovery" and create a thread at ->sync_thread.
  6883. * When the thread finishes it sets MD_RECOVERY_DONE
  6884. * and wakeups up this thread which will reap the thread and finish up.
  6885. * This thread also removes any faulty devices (with nr_pending == 0).
  6886. *
  6887. * The overall approach is:
  6888. * 1/ if the superblock needs updating, update it.
  6889. * 2/ If a recovery thread is running, don't do anything else.
  6890. * 3/ If recovery has finished, clean up, possibly marking spares active.
  6891. * 4/ If there are any faulty devices, remove them.
  6892. * 5/ If array is degraded, try to add spares devices
  6893. * 6/ If array has spares or is not in-sync, start a resync thread.
  6894. */
  6895. void md_check_recovery(struct mddev *mddev)
  6896. {
  6897. if (mddev->suspended)
  6898. return;
  6899. if (mddev->bitmap)
  6900. bitmap_daemon_work(mddev);
  6901. if (signal_pending(current)) {
  6902. if (mddev->pers->sync_request && !mddev->external) {
  6903. printk(KERN_INFO "md: %s in immediate safe mode\n",
  6904. mdname(mddev));
  6905. mddev->safemode = 2;
  6906. }
  6907. flush_signals(current);
  6908. }
  6909. if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
  6910. return;
  6911. if ( ! (
  6912. (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
  6913. test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
  6914. test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
  6915. (mddev->external == 0 && mddev->safemode == 1) ||
  6916. (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
  6917. && !mddev->in_sync && mddev->recovery_cp == MaxSector)
  6918. ))
  6919. return;
  6920. if (mddev_trylock(mddev)) {
  6921. int spares = 0;
  6922. if (mddev->ro) {
  6923. /* On a read-only array we can:
  6924. * - remove failed devices
  6925. * - add already-in_sync devices if the array itself
  6926. * is in-sync.
  6927. * As we only add devices that are already in-sync,
  6928. * we can activate the spares immediately.
  6929. */
  6930. clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6931. remove_and_add_spares(mddev, NULL);
  6932. mddev->pers->spare_active(mddev);
  6933. goto unlock;
  6934. }
  6935. if (!mddev->external) {
  6936. int did_change = 0;
  6937. spin_lock_irq(&mddev->write_lock);
  6938. if (mddev->safemode &&
  6939. !atomic_read(&mddev->writes_pending) &&
  6940. !mddev->in_sync &&
  6941. mddev->recovery_cp == MaxSector) {
  6942. mddev->in_sync = 1;
  6943. did_change = 1;
  6944. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  6945. }
  6946. if (mddev->safemode == 1)
  6947. mddev->safemode = 0;
  6948. spin_unlock_irq(&mddev->write_lock);
  6949. if (did_change)
  6950. sysfs_notify_dirent_safe(mddev->sysfs_state);
  6951. }
  6952. if (mddev->flags & MD_UPDATE_SB_FLAGS)
  6953. md_update_sb(mddev, 0);
  6954. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
  6955. !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
  6956. /* resync/recovery still happening */
  6957. clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6958. goto unlock;
  6959. }
  6960. if (mddev->sync_thread) {
  6961. md_reap_sync_thread(mddev);
  6962. goto unlock;
  6963. }
  6964. /* Set RUNNING before clearing NEEDED to avoid
  6965. * any transients in the value of "sync_action".
  6966. */
  6967. mddev->curr_resync_completed = 0;
  6968. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  6969. /* Clear some bits that don't mean anything, but
  6970. * might be left set
  6971. */
  6972. clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6973. clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
  6974. if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
  6975. test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
  6976. goto unlock;
  6977. /* no recovery is running.
  6978. * remove any failed drives, then
  6979. * add spares if possible.
  6980. * Spares are also removed and re-added, to allow
  6981. * the personality to fail the re-add.
  6982. */
  6983. if (mddev->reshape_position != MaxSector) {
  6984. if (mddev->pers->check_reshape == NULL ||
  6985. mddev->pers->check_reshape(mddev) != 0)
  6986. /* Cannot proceed */
  6987. goto unlock;
  6988. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  6989. clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  6990. } else if ((spares = remove_and_add_spares(mddev, NULL))) {
  6991. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  6992. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  6993. clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  6994. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  6995. } else if (mddev->recovery_cp < MaxSector) {
  6996. set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  6997. clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  6998. } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
  6999. /* nothing to be done ... */
  7000. goto unlock;
  7001. if (mddev->pers->sync_request) {
  7002. if (spares) {
  7003. /* We are adding a device or devices to an array
  7004. * which has the bitmap stored on all devices.
  7005. * So make sure all bitmap pages get written
  7006. */
  7007. bitmap_write_all(mddev->bitmap);
  7008. }
  7009. mddev->sync_thread = md_register_thread(md_do_sync,
  7010. mddev,
  7011. "resync");
  7012. if (!mddev->sync_thread) {
  7013. printk(KERN_ERR "%s: could not start resync"
  7014. " thread...\n",
  7015. mdname(mddev));
  7016. /* leave the spares where they are, it shouldn't hurt */
  7017. clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  7018. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  7019. clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  7020. clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  7021. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  7022. } else
  7023. md_wakeup_thread(mddev->sync_thread);
  7024. sysfs_notify_dirent_safe(mddev->sysfs_action);
  7025. md_new_event(mddev);
  7026. }
  7027. unlock:
  7028. wake_up(&mddev->sb_wait);
  7029. if (!mddev->sync_thread) {
  7030. clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  7031. if (test_and_clear_bit(MD_RECOVERY_RECOVER,
  7032. &mddev->recovery))
  7033. if (mddev->sysfs_action)
  7034. sysfs_notify_dirent_safe(mddev->sysfs_action);
  7035. }
  7036. mddev_unlock(mddev);
  7037. }
  7038. }
  7039. void md_reap_sync_thread(struct mddev *mddev)
  7040. {
  7041. struct md_rdev *rdev;
  7042. /* resync has finished, collect result */
  7043. md_unregister_thread(&mddev->sync_thread);
  7044. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
  7045. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  7046. /* success...*/
  7047. /* activate any spares */
  7048. if (mddev->pers->spare_active(mddev)) {
  7049. sysfs_notify(&mddev->kobj, NULL,
  7050. "degraded");
  7051. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  7052. }
  7053. }
  7054. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  7055. mddev->pers->finish_reshape)
  7056. mddev->pers->finish_reshape(mddev);
  7057. /* If array is no-longer degraded, then any saved_raid_disk
  7058. * information must be scrapped. Also if any device is now
  7059. * In_sync we must scrape the saved_raid_disk for that device
  7060. * do the superblock for an incrementally recovered device
  7061. * written out.
  7062. */
  7063. rdev_for_each(rdev, mddev)
  7064. if (!mddev->degraded ||
  7065. test_bit(In_sync, &rdev->flags))
  7066. rdev->saved_raid_disk = -1;
  7067. md_update_sb(mddev, 1);
  7068. clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  7069. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  7070. clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  7071. clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  7072. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  7073. /* flag recovery needed just to double check */
  7074. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  7075. sysfs_notify_dirent_safe(mddev->sysfs_action);
  7076. md_new_event(mddev);
  7077. if (mddev->event_work.func)
  7078. queue_work(md_misc_wq, &mddev->event_work);
  7079. }
  7080. void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
  7081. {
  7082. sysfs_notify_dirent_safe(rdev->sysfs_state);
  7083. wait_event_timeout(rdev->blocked_wait,
  7084. !test_bit(Blocked, &rdev->flags) &&
  7085. !test_bit(BlockedBadBlocks, &rdev->flags),
  7086. msecs_to_jiffies(5000));
  7087. rdev_dec_pending(rdev, mddev);
  7088. }
  7089. EXPORT_SYMBOL(md_wait_for_blocked_rdev);
  7090. void md_finish_reshape(struct mddev *mddev)
  7091. {
  7092. /* called be personality module when reshape completes. */
  7093. struct md_rdev *rdev;
  7094. rdev_for_each(rdev, mddev) {
  7095. if (rdev->data_offset > rdev->new_data_offset)
  7096. rdev->sectors += rdev->data_offset - rdev->new_data_offset;
  7097. else
  7098. rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
  7099. rdev->data_offset = rdev->new_data_offset;
  7100. }
  7101. }
  7102. EXPORT_SYMBOL(md_finish_reshape);
  7103. /* Bad block management.
  7104. * We can record which blocks on each device are 'bad' and so just
  7105. * fail those blocks, or that stripe, rather than the whole device.
  7106. * Entries in the bad-block table are 64bits wide. This comprises:
  7107. * Length of bad-range, in sectors: 0-511 for lengths 1-512
  7108. * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
  7109. * A 'shift' can be set so that larger blocks are tracked and
  7110. * consequently larger devices can be covered.
  7111. * 'Acknowledged' flag - 1 bit. - the most significant bit.
  7112. *
  7113. * Locking of the bad-block table uses a seqlock so md_is_badblock
  7114. * might need to retry if it is very unlucky.
  7115. * We will sometimes want to check for bad blocks in a bi_end_io function,
  7116. * so we use the write_seqlock_irq variant.
  7117. *
  7118. * When looking for a bad block we specify a range and want to
  7119. * know if any block in the range is bad. So we binary-search
  7120. * to the last range that starts at-or-before the given endpoint,
  7121. * (or "before the sector after the target range")
  7122. * then see if it ends after the given start.
  7123. * We return
  7124. * 0 if there are no known bad blocks in the range
  7125. * 1 if there are known bad block which are all acknowledged
  7126. * -1 if there are bad blocks which have not yet been acknowledged in metadata.
  7127. * plus the start/length of the first bad section we overlap.
  7128. */
  7129. int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
  7130. sector_t *first_bad, int *bad_sectors)
  7131. {
  7132. int hi;
  7133. int lo;
  7134. u64 *p = bb->page;
  7135. int rv;
  7136. sector_t target = s + sectors;
  7137. unsigned seq;
  7138. if (bb->shift > 0) {
  7139. /* round the start down, and the end up */
  7140. s >>= bb->shift;
  7141. target += (1<<bb->shift) - 1;
  7142. target >>= bb->shift;
  7143. sectors = target - s;
  7144. }
  7145. /* 'target' is now the first block after the bad range */
  7146. retry:
  7147. seq = read_seqbegin(&bb->lock);
  7148. lo = 0;
  7149. rv = 0;
  7150. hi = bb->count;
  7151. /* Binary search between lo and hi for 'target'
  7152. * i.e. for the last range that starts before 'target'
  7153. */
  7154. /* INVARIANT: ranges before 'lo' and at-or-after 'hi'
  7155. * are known not to be the last range before target.
  7156. * VARIANT: hi-lo is the number of possible
  7157. * ranges, and decreases until it reaches 1
  7158. */
  7159. while (hi - lo > 1) {
  7160. int mid = (lo + hi) / 2;
  7161. sector_t a = BB_OFFSET(p[mid]);
  7162. if (a < target)
  7163. /* This could still be the one, earlier ranges
  7164. * could not. */
  7165. lo = mid;
  7166. else
  7167. /* This and later ranges are definitely out. */
  7168. hi = mid;
  7169. }
  7170. /* 'lo' might be the last that started before target, but 'hi' isn't */
  7171. if (hi > lo) {
  7172. /* need to check all range that end after 's' to see if
  7173. * any are unacknowledged.
  7174. */
  7175. while (lo >= 0 &&
  7176. BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
  7177. if (BB_OFFSET(p[lo]) < target) {
  7178. /* starts before the end, and finishes after
  7179. * the start, so they must overlap
  7180. */
  7181. if (rv != -1 && BB_ACK(p[lo]))
  7182. rv = 1;
  7183. else
  7184. rv = -1;
  7185. *first_bad = BB_OFFSET(p[lo]);
  7186. *bad_sectors = BB_LEN(p[lo]);
  7187. }
  7188. lo--;
  7189. }
  7190. }
  7191. if (read_seqretry(&bb->lock, seq))
  7192. goto retry;
  7193. return rv;
  7194. }
  7195. EXPORT_SYMBOL_GPL(md_is_badblock);
  7196. /*
  7197. * Add a range of bad blocks to the table.
  7198. * This might extend the table, or might contract it
  7199. * if two adjacent ranges can be merged.
  7200. * We binary-search to find the 'insertion' point, then
  7201. * decide how best to handle it.
  7202. */
  7203. static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
  7204. int acknowledged)
  7205. {
  7206. u64 *p;
  7207. int lo, hi;
  7208. int rv = 1;
  7209. unsigned long flags;
  7210. if (bb->shift < 0)
  7211. /* badblocks are disabled */
  7212. return 0;
  7213. if (bb->shift) {
  7214. /* round the start down, and the end up */
  7215. sector_t next = s + sectors;
  7216. s >>= bb->shift;
  7217. next += (1<<bb->shift) - 1;
  7218. next >>= bb->shift;
  7219. sectors = next - s;
  7220. }
  7221. write_seqlock_irqsave(&bb->lock, flags);
  7222. p = bb->page;
  7223. lo = 0;
  7224. hi = bb->count;
  7225. /* Find the last range that starts at-or-before 's' */
  7226. while (hi - lo > 1) {
  7227. int mid = (lo + hi) / 2;
  7228. sector_t a = BB_OFFSET(p[mid]);
  7229. if (a <= s)
  7230. lo = mid;
  7231. else
  7232. hi = mid;
  7233. }
  7234. if (hi > lo && BB_OFFSET(p[lo]) > s)
  7235. hi = lo;
  7236. if (hi > lo) {
  7237. /* we found a range that might merge with the start
  7238. * of our new range
  7239. */
  7240. sector_t a = BB_OFFSET(p[lo]);
  7241. sector_t e = a + BB_LEN(p[lo]);
  7242. int ack = BB_ACK(p[lo]);
  7243. if (e >= s) {
  7244. /* Yes, we can merge with a previous range */
  7245. if (s == a && s + sectors >= e)
  7246. /* new range covers old */
  7247. ack = acknowledged;
  7248. else
  7249. ack = ack && acknowledged;
  7250. if (e < s + sectors)
  7251. e = s + sectors;
  7252. if (e - a <= BB_MAX_LEN) {
  7253. p[lo] = BB_MAKE(a, e-a, ack);
  7254. s = e;
  7255. } else {
  7256. /* does not all fit in one range,
  7257. * make p[lo] maximal
  7258. */
  7259. if (BB_LEN(p[lo]) != BB_MAX_LEN)
  7260. p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
  7261. s = a + BB_MAX_LEN;
  7262. }
  7263. sectors = e - s;
  7264. }
  7265. }
  7266. if (sectors && hi < bb->count) {
  7267. /* 'hi' points to the first range that starts after 's'.
  7268. * Maybe we can merge with the start of that range */
  7269. sector_t a = BB_OFFSET(p[hi]);
  7270. sector_t e = a + BB_LEN(p[hi]);
  7271. int ack = BB_ACK(p[hi]);
  7272. if (a <= s + sectors) {
  7273. /* merging is possible */
  7274. if (e <= s + sectors) {
  7275. /* full overlap */
  7276. e = s + sectors;
  7277. ack = acknowledged;
  7278. } else
  7279. ack = ack && acknowledged;
  7280. a = s;
  7281. if (e - a <= BB_MAX_LEN) {
  7282. p[hi] = BB_MAKE(a, e-a, ack);
  7283. s = e;
  7284. } else {
  7285. p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
  7286. s = a + BB_MAX_LEN;
  7287. }
  7288. sectors = e - s;
  7289. lo = hi;
  7290. hi++;
  7291. }
  7292. }
  7293. if (sectors == 0 && hi < bb->count) {
  7294. /* we might be able to combine lo and hi */
  7295. /* Note: 's' is at the end of 'lo' */
  7296. sector_t a = BB_OFFSET(p[hi]);
  7297. int lolen = BB_LEN(p[lo]);
  7298. int hilen = BB_LEN(p[hi]);
  7299. int newlen = lolen + hilen - (s - a);
  7300. if (s >= a && newlen < BB_MAX_LEN) {
  7301. /* yes, we can combine them */
  7302. int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
  7303. p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
  7304. memmove(p + hi, p + hi + 1,
  7305. (bb->count - hi - 1) * 8);
  7306. bb->count--;
  7307. }
  7308. }
  7309. while (sectors) {
  7310. /* didn't merge (it all).
  7311. * Need to add a range just before 'hi' */
  7312. if (bb->count >= MD_MAX_BADBLOCKS) {
  7313. /* No room for more */
  7314. rv = 0;
  7315. break;
  7316. } else {
  7317. int this_sectors = sectors;
  7318. memmove(p + hi + 1, p + hi,
  7319. (bb->count - hi) * 8);
  7320. bb->count++;
  7321. if (this_sectors > BB_MAX_LEN)
  7322. this_sectors = BB_MAX_LEN;
  7323. p[hi] = BB_MAKE(s, this_sectors, acknowledged);
  7324. sectors -= this_sectors;
  7325. s += this_sectors;
  7326. }
  7327. }
  7328. bb->changed = 1;
  7329. if (!acknowledged)
  7330. bb->unacked_exist = 1;
  7331. write_sequnlock_irqrestore(&bb->lock, flags);
  7332. return rv;
  7333. }
  7334. int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
  7335. int is_new)
  7336. {
  7337. int rv;
  7338. if (is_new)
  7339. s += rdev->new_data_offset;
  7340. else
  7341. s += rdev->data_offset;
  7342. rv = md_set_badblocks(&rdev->badblocks,
  7343. s, sectors, 0);
  7344. if (rv) {
  7345. /* Make sure they get written out promptly */
  7346. sysfs_notify_dirent_safe(rdev->sysfs_state);
  7347. set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
  7348. md_wakeup_thread(rdev->mddev->thread);
  7349. }
  7350. return rv;
  7351. }
  7352. EXPORT_SYMBOL_GPL(rdev_set_badblocks);
  7353. /*
  7354. * Remove a range of bad blocks from the table.
  7355. * This may involve extending the table if we spilt a region,
  7356. * but it must not fail. So if the table becomes full, we just
  7357. * drop the remove request.
  7358. */
  7359. static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
  7360. {
  7361. u64 *p;
  7362. int lo, hi;
  7363. sector_t target = s + sectors;
  7364. int rv = 0;
  7365. if (bb->shift > 0) {
  7366. /* When clearing we round the start up and the end down.
  7367. * This should not matter as the shift should align with
  7368. * the block size and no rounding should ever be needed.
  7369. * However it is better the think a block is bad when it
  7370. * isn't than to think a block is not bad when it is.
  7371. */
  7372. s += (1<<bb->shift) - 1;
  7373. s >>= bb->shift;
  7374. target >>= bb->shift;
  7375. sectors = target - s;
  7376. }
  7377. write_seqlock_irq(&bb->lock);
  7378. p = bb->page;
  7379. lo = 0;
  7380. hi = bb->count;
  7381. /* Find the last range that starts before 'target' */
  7382. while (hi - lo > 1) {
  7383. int mid = (lo + hi) / 2;
  7384. sector_t a = BB_OFFSET(p[mid]);
  7385. if (a < target)
  7386. lo = mid;
  7387. else
  7388. hi = mid;
  7389. }
  7390. if (hi > lo) {
  7391. /* p[lo] is the last range that could overlap the
  7392. * current range. Earlier ranges could also overlap,
  7393. * but only this one can overlap the end of the range.
  7394. */
  7395. if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
  7396. /* Partial overlap, leave the tail of this range */
  7397. int ack = BB_ACK(p[lo]);
  7398. sector_t a = BB_OFFSET(p[lo]);
  7399. sector_t end = a + BB_LEN(p[lo]);
  7400. if (a < s) {
  7401. /* we need to split this range */
  7402. if (bb->count >= MD_MAX_BADBLOCKS) {
  7403. rv = 0;
  7404. goto out;
  7405. }
  7406. memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
  7407. bb->count++;
  7408. p[lo] = BB_MAKE(a, s-a, ack);
  7409. lo++;
  7410. }
  7411. p[lo] = BB_MAKE(target, end - target, ack);
  7412. /* there is no longer an overlap */
  7413. hi = lo;
  7414. lo--;
  7415. }
  7416. while (lo >= 0 &&
  7417. BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
  7418. /* This range does overlap */
  7419. if (BB_OFFSET(p[lo]) < s) {
  7420. /* Keep the early parts of this range. */
  7421. int ack = BB_ACK(p[lo]);
  7422. sector_t start = BB_OFFSET(p[lo]);
  7423. p[lo] = BB_MAKE(start, s - start, ack);
  7424. /* now low doesn't overlap, so.. */
  7425. break;
  7426. }
  7427. lo--;
  7428. }
  7429. /* 'lo' is strictly before, 'hi' is strictly after,
  7430. * anything between needs to be discarded
  7431. */
  7432. if (hi - lo > 1) {
  7433. memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
  7434. bb->count -= (hi - lo - 1);
  7435. }
  7436. }
  7437. bb->changed = 1;
  7438. out:
  7439. write_sequnlock_irq(&bb->lock);
  7440. return rv;
  7441. }
  7442. int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
  7443. int is_new)
  7444. {
  7445. if (is_new)
  7446. s += rdev->new_data_offset;
  7447. else
  7448. s += rdev->data_offset;
  7449. return md_clear_badblocks(&rdev->badblocks,
  7450. s, sectors);
  7451. }
  7452. EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
  7453. /*
  7454. * Acknowledge all bad blocks in a list.
  7455. * This only succeeds if ->changed is clear. It is used by
  7456. * in-kernel metadata updates
  7457. */
  7458. void md_ack_all_badblocks(struct badblocks *bb)
  7459. {
  7460. if (bb->page == NULL || bb->changed)
  7461. /* no point even trying */
  7462. return;
  7463. write_seqlock_irq(&bb->lock);
  7464. if (bb->changed == 0 && bb->unacked_exist) {
  7465. u64 *p = bb->page;
  7466. int i;
  7467. for (i = 0; i < bb->count ; i++) {
  7468. if (!BB_ACK(p[i])) {
  7469. sector_t start = BB_OFFSET(p[i]);
  7470. int len = BB_LEN(p[i]);
  7471. p[i] = BB_MAKE(start, len, 1);
  7472. }
  7473. }
  7474. bb->unacked_exist = 0;
  7475. }
  7476. write_sequnlock_irq(&bb->lock);
  7477. }
  7478. EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
  7479. /* sysfs access to bad-blocks list.
  7480. * We present two files.
  7481. * 'bad-blocks' lists sector numbers and lengths of ranges that
  7482. * are recorded as bad. The list is truncated to fit within
  7483. * the one-page limit of sysfs.
  7484. * Writing "sector length" to this file adds an acknowledged
  7485. * bad block list.
  7486. * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
  7487. * been acknowledged. Writing to this file adds bad blocks
  7488. * without acknowledging them. This is largely for testing.
  7489. */
  7490. static ssize_t
  7491. badblocks_show(struct badblocks *bb, char *page, int unack)
  7492. {
  7493. size_t len;
  7494. int i;
  7495. u64 *p = bb->page;
  7496. unsigned seq;
  7497. if (bb->shift < 0)
  7498. return 0;
  7499. retry:
  7500. seq = read_seqbegin(&bb->lock);
  7501. len = 0;
  7502. i = 0;
  7503. while (len < PAGE_SIZE && i < bb->count) {
  7504. sector_t s = BB_OFFSET(p[i]);
  7505. unsigned int length = BB_LEN(p[i]);
  7506. int ack = BB_ACK(p[i]);
  7507. i++;
  7508. if (unack && ack)
  7509. continue;
  7510. len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
  7511. (unsigned long long)s << bb->shift,
  7512. length << bb->shift);
  7513. }
  7514. if (unack && len == 0)
  7515. bb->unacked_exist = 0;
  7516. if (read_seqretry(&bb->lock, seq))
  7517. goto retry;
  7518. return len;
  7519. }
  7520. #define DO_DEBUG 1
  7521. static ssize_t
  7522. badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
  7523. {
  7524. unsigned long long sector;
  7525. int length;
  7526. char newline;
  7527. #ifdef DO_DEBUG
  7528. /* Allow clearing via sysfs *only* for testing/debugging.
  7529. * Normally only a successful write may clear a badblock
  7530. */
  7531. int clear = 0;
  7532. if (page[0] == '-') {
  7533. clear = 1;
  7534. page++;
  7535. }
  7536. #endif /* DO_DEBUG */
  7537. switch (sscanf(page, "%llu %d%c", &sector, &length, &newline)) {
  7538. case 3:
  7539. if (newline != '\n')
  7540. return -EINVAL;
  7541. case 2:
  7542. if (length <= 0)
  7543. return -EINVAL;
  7544. break;
  7545. default:
  7546. return -EINVAL;
  7547. }
  7548. #ifdef DO_DEBUG
  7549. if (clear) {
  7550. md_clear_badblocks(bb, sector, length);
  7551. return len;
  7552. }
  7553. #endif /* DO_DEBUG */
  7554. if (md_set_badblocks(bb, sector, length, !unack))
  7555. return len;
  7556. else
  7557. return -ENOSPC;
  7558. }
  7559. static int md_notify_reboot(struct notifier_block *this,
  7560. unsigned long code, void *x)
  7561. {
  7562. struct list_head *tmp;
  7563. struct mddev *mddev;
  7564. int need_delay = 0;
  7565. for_each_mddev(mddev, tmp) {
  7566. if (mddev_trylock(mddev)) {
  7567. if (mddev->pers)
  7568. __md_stop_writes(mddev);
  7569. mddev->safemode = 2;
  7570. mddev_unlock(mddev);
  7571. }
  7572. need_delay = 1;
  7573. }
  7574. /*
  7575. * certain more exotic SCSI devices are known to be
  7576. * volatile wrt too early system reboots. While the
  7577. * right place to handle this issue is the given
  7578. * driver, we do want to have a safe RAID driver ...
  7579. */
  7580. if (need_delay)
  7581. mdelay(1000*1);
  7582. return NOTIFY_DONE;
  7583. }
  7584. static struct notifier_block md_notifier = {
  7585. .notifier_call = md_notify_reboot,
  7586. .next = NULL,
  7587. .priority = INT_MAX, /* before any real devices */
  7588. };
  7589. static void md_geninit(void)
  7590. {
  7591. pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
  7592. proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
  7593. }
  7594. static int __init md_init(void)
  7595. {
  7596. int ret = -ENOMEM;
  7597. md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
  7598. if (!md_wq)
  7599. goto err_wq;
  7600. md_misc_wq = alloc_workqueue("md_misc", 0, 0);
  7601. if (!md_misc_wq)
  7602. goto err_misc_wq;
  7603. if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
  7604. goto err_md;
  7605. if ((ret = register_blkdev(0, "mdp")) < 0)
  7606. goto err_mdp;
  7607. mdp_major = ret;
  7608. blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
  7609. md_probe, NULL, NULL);
  7610. blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
  7611. md_probe, NULL, NULL);
  7612. register_reboot_notifier(&md_notifier);
  7613. raid_table_header = register_sysctl_table(raid_root_table);
  7614. md_geninit();
  7615. return 0;
  7616. err_mdp:
  7617. unregister_blkdev(MD_MAJOR, "md");
  7618. err_md:
  7619. destroy_workqueue(md_misc_wq);
  7620. err_misc_wq:
  7621. destroy_workqueue(md_wq);
  7622. err_wq:
  7623. return ret;
  7624. }
  7625. #ifndef MODULE
  7626. /*
  7627. * Searches all registered partitions for autorun RAID arrays
  7628. * at boot time.
  7629. */
  7630. static LIST_HEAD(all_detected_devices);
  7631. struct detected_devices_node {
  7632. struct list_head list;
  7633. dev_t dev;
  7634. };
  7635. void md_autodetect_dev(dev_t dev)
  7636. {
  7637. struct detected_devices_node *node_detected_dev;
  7638. node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
  7639. if (node_detected_dev) {
  7640. node_detected_dev->dev = dev;
  7641. list_add_tail(&node_detected_dev->list, &all_detected_devices);
  7642. } else {
  7643. printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
  7644. ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
  7645. }
  7646. }
  7647. static void autostart_arrays(int part)
  7648. {
  7649. struct md_rdev *rdev;
  7650. struct detected_devices_node *node_detected_dev;
  7651. dev_t dev;
  7652. int i_scanned, i_passed;
  7653. i_scanned = 0;
  7654. i_passed = 0;
  7655. printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
  7656. while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
  7657. i_scanned++;
  7658. node_detected_dev = list_entry(all_detected_devices.next,
  7659. struct detected_devices_node, list);
  7660. list_del(&node_detected_dev->list);
  7661. dev = node_detected_dev->dev;
  7662. kfree(node_detected_dev);
  7663. rdev = md_import_device(dev,0, 90);
  7664. if (IS_ERR(rdev))
  7665. continue;
  7666. if (test_bit(Faulty, &rdev->flags)) {
  7667. MD_BUG();
  7668. continue;
  7669. }
  7670. set_bit(AutoDetected, &rdev->flags);
  7671. list_add(&rdev->same_set, &pending_raid_disks);
  7672. i_passed++;
  7673. }
  7674. printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
  7675. i_scanned, i_passed);
  7676. autorun_devices(part);
  7677. }
  7678. #endif /* !MODULE */
  7679. static __exit void md_exit(void)
  7680. {
  7681. struct mddev *mddev;
  7682. struct list_head *tmp;
  7683. blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
  7684. blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
  7685. unregister_blkdev(MD_MAJOR,"md");
  7686. unregister_blkdev(mdp_major, "mdp");
  7687. unregister_reboot_notifier(&md_notifier);
  7688. unregister_sysctl_table(raid_table_header);
  7689. remove_proc_entry("mdstat", NULL);
  7690. for_each_mddev(mddev, tmp) {
  7691. export_array(mddev);
  7692. mddev->hold_active = 0;
  7693. }
  7694. destroy_workqueue(md_misc_wq);
  7695. destroy_workqueue(md_wq);
  7696. }
  7697. subsys_initcall(md_init);
  7698. module_exit(md_exit)
  7699. static int get_ro(char *buffer, struct kernel_param *kp)
  7700. {
  7701. return sprintf(buffer, "%d", start_readonly);
  7702. }
  7703. static int set_ro(const char *val, struct kernel_param *kp)
  7704. {
  7705. char *e;
  7706. int num = simple_strtoul(val, &e, 10);
  7707. if (*val && (*e == '\0' || *e == '\n')) {
  7708. start_readonly = num;
  7709. return 0;
  7710. }
  7711. return -EINVAL;
  7712. }
  7713. module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
  7714. module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
  7715. module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
  7716. EXPORT_SYMBOL(register_md_personality);
  7717. EXPORT_SYMBOL(unregister_md_personality);
  7718. EXPORT_SYMBOL(md_error);
  7719. EXPORT_SYMBOL(md_done_sync);
  7720. EXPORT_SYMBOL(md_write_start);
  7721. EXPORT_SYMBOL(md_write_end);
  7722. EXPORT_SYMBOL(md_register_thread);
  7723. EXPORT_SYMBOL(md_unregister_thread);
  7724. EXPORT_SYMBOL(md_wakeup_thread);
  7725. EXPORT_SYMBOL(md_check_recovery);
  7726. EXPORT_SYMBOL(md_reap_sync_thread);
  7727. MODULE_LICENSE("GPL");
  7728. MODULE_DESCRIPTION("MD RAID framework");
  7729. MODULE_ALIAS("md");
  7730. MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);