extent-tree.c 203 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/sort.h>
  23. #include <linux/rcupdate.h>
  24. #include <linux/kthread.h>
  25. #include <linux/slab.h>
  26. #include <linux/ratelimit.h>
  27. #include "compat.h"
  28. #include "hash.h"
  29. #include "ctree.h"
  30. #include "disk-io.h"
  31. #include "print-tree.h"
  32. #include "transaction.h"
  33. #include "volumes.h"
  34. #include "locking.h"
  35. #include "free-space-cache.h"
  36. /* control flags for do_chunk_alloc's force field
  37. * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
  38. * if we really need one.
  39. *
  40. * CHUNK_ALLOC_FORCE means it must try to allocate one
  41. *
  42. * CHUNK_ALLOC_LIMITED means to only try and allocate one
  43. * if we have very few chunks already allocated. This is
  44. * used as part of the clustering code to help make sure
  45. * we have a good pool of storage to cluster in, without
  46. * filling the FS with empty chunks
  47. *
  48. */
  49. enum {
  50. CHUNK_ALLOC_NO_FORCE = 0,
  51. CHUNK_ALLOC_FORCE = 1,
  52. CHUNK_ALLOC_LIMITED = 2,
  53. };
  54. /*
  55. * Control how reservations are dealt with.
  56. *
  57. * RESERVE_FREE - freeing a reservation.
  58. * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
  59. * ENOSPC accounting
  60. * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
  61. * bytes_may_use as the ENOSPC accounting is done elsewhere
  62. */
  63. enum {
  64. RESERVE_FREE = 0,
  65. RESERVE_ALLOC = 1,
  66. RESERVE_ALLOC_NO_ACCOUNT = 2,
  67. };
  68. static int update_block_group(struct btrfs_trans_handle *trans,
  69. struct btrfs_root *root,
  70. u64 bytenr, u64 num_bytes, int alloc);
  71. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  72. struct btrfs_root *root,
  73. u64 bytenr, u64 num_bytes, u64 parent,
  74. u64 root_objectid, u64 owner_objectid,
  75. u64 owner_offset, int refs_to_drop,
  76. struct btrfs_delayed_extent_op *extra_op);
  77. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  78. struct extent_buffer *leaf,
  79. struct btrfs_extent_item *ei);
  80. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  81. struct btrfs_root *root,
  82. u64 parent, u64 root_objectid,
  83. u64 flags, u64 owner, u64 offset,
  84. struct btrfs_key *ins, int ref_mod);
  85. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  86. struct btrfs_root *root,
  87. u64 parent, u64 root_objectid,
  88. u64 flags, struct btrfs_disk_key *key,
  89. int level, struct btrfs_key *ins);
  90. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  91. struct btrfs_root *extent_root, u64 alloc_bytes,
  92. u64 flags, int force);
  93. static int find_next_key(struct btrfs_path *path, int level,
  94. struct btrfs_key *key);
  95. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  96. int dump_block_groups);
  97. static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
  98. u64 num_bytes, int reserve);
  99. static noinline int
  100. block_group_cache_done(struct btrfs_block_group_cache *cache)
  101. {
  102. smp_mb();
  103. return cache->cached == BTRFS_CACHE_FINISHED;
  104. }
  105. static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
  106. {
  107. return (cache->flags & bits) == bits;
  108. }
  109. static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
  110. {
  111. atomic_inc(&cache->count);
  112. }
  113. void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
  114. {
  115. if (atomic_dec_and_test(&cache->count)) {
  116. WARN_ON(cache->pinned > 0);
  117. WARN_ON(cache->reserved > 0);
  118. kfree(cache->free_space_ctl);
  119. kfree(cache);
  120. }
  121. }
  122. /*
  123. * this adds the block group to the fs_info rb tree for the block group
  124. * cache
  125. */
  126. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  127. struct btrfs_block_group_cache *block_group)
  128. {
  129. struct rb_node **p;
  130. struct rb_node *parent = NULL;
  131. struct btrfs_block_group_cache *cache;
  132. spin_lock(&info->block_group_cache_lock);
  133. p = &info->block_group_cache_tree.rb_node;
  134. while (*p) {
  135. parent = *p;
  136. cache = rb_entry(parent, struct btrfs_block_group_cache,
  137. cache_node);
  138. if (block_group->key.objectid < cache->key.objectid) {
  139. p = &(*p)->rb_left;
  140. } else if (block_group->key.objectid > cache->key.objectid) {
  141. p = &(*p)->rb_right;
  142. } else {
  143. spin_unlock(&info->block_group_cache_lock);
  144. return -EEXIST;
  145. }
  146. }
  147. rb_link_node(&block_group->cache_node, parent, p);
  148. rb_insert_color(&block_group->cache_node,
  149. &info->block_group_cache_tree);
  150. spin_unlock(&info->block_group_cache_lock);
  151. return 0;
  152. }
  153. /*
  154. * This will return the block group at or after bytenr if contains is 0, else
  155. * it will return the block group that contains the bytenr
  156. */
  157. static struct btrfs_block_group_cache *
  158. block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
  159. int contains)
  160. {
  161. struct btrfs_block_group_cache *cache, *ret = NULL;
  162. struct rb_node *n;
  163. u64 end, start;
  164. spin_lock(&info->block_group_cache_lock);
  165. n = info->block_group_cache_tree.rb_node;
  166. while (n) {
  167. cache = rb_entry(n, struct btrfs_block_group_cache,
  168. cache_node);
  169. end = cache->key.objectid + cache->key.offset - 1;
  170. start = cache->key.objectid;
  171. if (bytenr < start) {
  172. if (!contains && (!ret || start < ret->key.objectid))
  173. ret = cache;
  174. n = n->rb_left;
  175. } else if (bytenr > start) {
  176. if (contains && bytenr <= end) {
  177. ret = cache;
  178. break;
  179. }
  180. n = n->rb_right;
  181. } else {
  182. ret = cache;
  183. break;
  184. }
  185. }
  186. if (ret)
  187. btrfs_get_block_group(ret);
  188. spin_unlock(&info->block_group_cache_lock);
  189. return ret;
  190. }
  191. static int add_excluded_extent(struct btrfs_root *root,
  192. u64 start, u64 num_bytes)
  193. {
  194. u64 end = start + num_bytes - 1;
  195. set_extent_bits(&root->fs_info->freed_extents[0],
  196. start, end, EXTENT_UPTODATE, GFP_NOFS);
  197. set_extent_bits(&root->fs_info->freed_extents[1],
  198. start, end, EXTENT_UPTODATE, GFP_NOFS);
  199. return 0;
  200. }
  201. static void free_excluded_extents(struct btrfs_root *root,
  202. struct btrfs_block_group_cache *cache)
  203. {
  204. u64 start, end;
  205. start = cache->key.objectid;
  206. end = start + cache->key.offset - 1;
  207. clear_extent_bits(&root->fs_info->freed_extents[0],
  208. start, end, EXTENT_UPTODATE, GFP_NOFS);
  209. clear_extent_bits(&root->fs_info->freed_extents[1],
  210. start, end, EXTENT_UPTODATE, GFP_NOFS);
  211. }
  212. static int exclude_super_stripes(struct btrfs_root *root,
  213. struct btrfs_block_group_cache *cache)
  214. {
  215. u64 bytenr;
  216. u64 *logical;
  217. int stripe_len;
  218. int i, nr, ret;
  219. if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
  220. stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
  221. cache->bytes_super += stripe_len;
  222. ret = add_excluded_extent(root, cache->key.objectid,
  223. stripe_len);
  224. BUG_ON(ret);
  225. }
  226. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  227. bytenr = btrfs_sb_offset(i);
  228. ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
  229. cache->key.objectid, bytenr,
  230. 0, &logical, &nr, &stripe_len);
  231. BUG_ON(ret);
  232. while (nr--) {
  233. cache->bytes_super += stripe_len;
  234. ret = add_excluded_extent(root, logical[nr],
  235. stripe_len);
  236. BUG_ON(ret);
  237. }
  238. kfree(logical);
  239. }
  240. return 0;
  241. }
  242. static struct btrfs_caching_control *
  243. get_caching_control(struct btrfs_block_group_cache *cache)
  244. {
  245. struct btrfs_caching_control *ctl;
  246. spin_lock(&cache->lock);
  247. if (cache->cached != BTRFS_CACHE_STARTED) {
  248. spin_unlock(&cache->lock);
  249. return NULL;
  250. }
  251. /* We're loading it the fast way, so we don't have a caching_ctl. */
  252. if (!cache->caching_ctl) {
  253. spin_unlock(&cache->lock);
  254. return NULL;
  255. }
  256. ctl = cache->caching_ctl;
  257. atomic_inc(&ctl->count);
  258. spin_unlock(&cache->lock);
  259. return ctl;
  260. }
  261. static void put_caching_control(struct btrfs_caching_control *ctl)
  262. {
  263. if (atomic_dec_and_test(&ctl->count))
  264. kfree(ctl);
  265. }
  266. /*
  267. * this is only called by cache_block_group, since we could have freed extents
  268. * we need to check the pinned_extents for any extents that can't be used yet
  269. * since their free space will be released as soon as the transaction commits.
  270. */
  271. static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
  272. struct btrfs_fs_info *info, u64 start, u64 end)
  273. {
  274. u64 extent_start, extent_end, size, total_added = 0;
  275. int ret;
  276. while (start < end) {
  277. ret = find_first_extent_bit(info->pinned_extents, start,
  278. &extent_start, &extent_end,
  279. EXTENT_DIRTY | EXTENT_UPTODATE);
  280. if (ret)
  281. break;
  282. if (extent_start <= start) {
  283. start = extent_end + 1;
  284. } else if (extent_start > start && extent_start < end) {
  285. size = extent_start - start;
  286. total_added += size;
  287. ret = btrfs_add_free_space(block_group, start,
  288. size);
  289. BUG_ON(ret);
  290. start = extent_end + 1;
  291. } else {
  292. break;
  293. }
  294. }
  295. if (start < end) {
  296. size = end - start;
  297. total_added += size;
  298. ret = btrfs_add_free_space(block_group, start, size);
  299. BUG_ON(ret);
  300. }
  301. return total_added;
  302. }
  303. static noinline void caching_thread(struct btrfs_work *work)
  304. {
  305. struct btrfs_block_group_cache *block_group;
  306. struct btrfs_fs_info *fs_info;
  307. struct btrfs_caching_control *caching_ctl;
  308. struct btrfs_root *extent_root;
  309. struct btrfs_path *path;
  310. struct extent_buffer *leaf;
  311. struct btrfs_key key;
  312. u64 total_found = 0;
  313. u64 last = 0;
  314. u32 nritems;
  315. int ret = 0;
  316. caching_ctl = container_of(work, struct btrfs_caching_control, work);
  317. block_group = caching_ctl->block_group;
  318. fs_info = block_group->fs_info;
  319. extent_root = fs_info->extent_root;
  320. path = btrfs_alloc_path();
  321. if (!path)
  322. goto out;
  323. last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
  324. /*
  325. * We don't want to deadlock with somebody trying to allocate a new
  326. * extent for the extent root while also trying to search the extent
  327. * root to add free space. So we skip locking and search the commit
  328. * root, since its read-only
  329. */
  330. path->skip_locking = 1;
  331. path->search_commit_root = 1;
  332. path->reada = 1;
  333. key.objectid = last;
  334. key.offset = 0;
  335. key.type = BTRFS_EXTENT_ITEM_KEY;
  336. again:
  337. mutex_lock(&caching_ctl->mutex);
  338. /* need to make sure the commit_root doesn't disappear */
  339. down_read(&fs_info->extent_commit_sem);
  340. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  341. if (ret < 0)
  342. goto err;
  343. leaf = path->nodes[0];
  344. nritems = btrfs_header_nritems(leaf);
  345. while (1) {
  346. if (btrfs_fs_closing(fs_info) > 1) {
  347. last = (u64)-1;
  348. break;
  349. }
  350. if (path->slots[0] < nritems) {
  351. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  352. } else {
  353. ret = find_next_key(path, 0, &key);
  354. if (ret)
  355. break;
  356. if (need_resched() ||
  357. btrfs_next_leaf(extent_root, path)) {
  358. caching_ctl->progress = last;
  359. btrfs_release_path(path);
  360. up_read(&fs_info->extent_commit_sem);
  361. mutex_unlock(&caching_ctl->mutex);
  362. cond_resched();
  363. goto again;
  364. }
  365. leaf = path->nodes[0];
  366. nritems = btrfs_header_nritems(leaf);
  367. continue;
  368. }
  369. if (key.objectid < block_group->key.objectid) {
  370. path->slots[0]++;
  371. continue;
  372. }
  373. if (key.objectid >= block_group->key.objectid +
  374. block_group->key.offset)
  375. break;
  376. if (key.type == BTRFS_EXTENT_ITEM_KEY) {
  377. total_found += add_new_free_space(block_group,
  378. fs_info, last,
  379. key.objectid);
  380. last = key.objectid + key.offset;
  381. if (total_found > (1024 * 1024 * 2)) {
  382. total_found = 0;
  383. wake_up(&caching_ctl->wait);
  384. }
  385. }
  386. path->slots[0]++;
  387. }
  388. ret = 0;
  389. total_found += add_new_free_space(block_group, fs_info, last,
  390. block_group->key.objectid +
  391. block_group->key.offset);
  392. caching_ctl->progress = (u64)-1;
  393. spin_lock(&block_group->lock);
  394. block_group->caching_ctl = NULL;
  395. block_group->cached = BTRFS_CACHE_FINISHED;
  396. spin_unlock(&block_group->lock);
  397. err:
  398. btrfs_free_path(path);
  399. up_read(&fs_info->extent_commit_sem);
  400. free_excluded_extents(extent_root, block_group);
  401. mutex_unlock(&caching_ctl->mutex);
  402. out:
  403. wake_up(&caching_ctl->wait);
  404. put_caching_control(caching_ctl);
  405. btrfs_put_block_group(block_group);
  406. }
  407. static int cache_block_group(struct btrfs_block_group_cache *cache,
  408. struct btrfs_trans_handle *trans,
  409. struct btrfs_root *root,
  410. int load_cache_only)
  411. {
  412. DEFINE_WAIT(wait);
  413. struct btrfs_fs_info *fs_info = cache->fs_info;
  414. struct btrfs_caching_control *caching_ctl;
  415. int ret = 0;
  416. caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
  417. BUG_ON(!caching_ctl);
  418. INIT_LIST_HEAD(&caching_ctl->list);
  419. mutex_init(&caching_ctl->mutex);
  420. init_waitqueue_head(&caching_ctl->wait);
  421. caching_ctl->block_group = cache;
  422. caching_ctl->progress = cache->key.objectid;
  423. atomic_set(&caching_ctl->count, 1);
  424. caching_ctl->work.func = caching_thread;
  425. spin_lock(&cache->lock);
  426. /*
  427. * This should be a rare occasion, but this could happen I think in the
  428. * case where one thread starts to load the space cache info, and then
  429. * some other thread starts a transaction commit which tries to do an
  430. * allocation while the other thread is still loading the space cache
  431. * info. The previous loop should have kept us from choosing this block
  432. * group, but if we've moved to the state where we will wait on caching
  433. * block groups we need to first check if we're doing a fast load here,
  434. * so we can wait for it to finish, otherwise we could end up allocating
  435. * from a block group who's cache gets evicted for one reason or
  436. * another.
  437. */
  438. while (cache->cached == BTRFS_CACHE_FAST) {
  439. struct btrfs_caching_control *ctl;
  440. ctl = cache->caching_ctl;
  441. atomic_inc(&ctl->count);
  442. prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
  443. spin_unlock(&cache->lock);
  444. schedule();
  445. finish_wait(&ctl->wait, &wait);
  446. put_caching_control(ctl);
  447. spin_lock(&cache->lock);
  448. }
  449. if (cache->cached != BTRFS_CACHE_NO) {
  450. spin_unlock(&cache->lock);
  451. kfree(caching_ctl);
  452. return 0;
  453. }
  454. WARN_ON(cache->caching_ctl);
  455. cache->caching_ctl = caching_ctl;
  456. cache->cached = BTRFS_CACHE_FAST;
  457. spin_unlock(&cache->lock);
  458. /*
  459. * We can't do the read from on-disk cache during a commit since we need
  460. * to have the normal tree locking. Also if we are currently trying to
  461. * allocate blocks for the tree root we can't do the fast caching since
  462. * we likely hold important locks.
  463. */
  464. if (trans && (!trans->transaction->in_commit) &&
  465. (root && root != root->fs_info->tree_root) &&
  466. btrfs_test_opt(root, SPACE_CACHE)) {
  467. ret = load_free_space_cache(fs_info, cache);
  468. spin_lock(&cache->lock);
  469. if (ret == 1) {
  470. cache->caching_ctl = NULL;
  471. cache->cached = BTRFS_CACHE_FINISHED;
  472. cache->last_byte_to_unpin = (u64)-1;
  473. } else {
  474. if (load_cache_only) {
  475. cache->caching_ctl = NULL;
  476. cache->cached = BTRFS_CACHE_NO;
  477. } else {
  478. cache->cached = BTRFS_CACHE_STARTED;
  479. }
  480. }
  481. spin_unlock(&cache->lock);
  482. wake_up(&caching_ctl->wait);
  483. if (ret == 1) {
  484. put_caching_control(caching_ctl);
  485. free_excluded_extents(fs_info->extent_root, cache);
  486. return 0;
  487. }
  488. } else {
  489. /*
  490. * We are not going to do the fast caching, set cached to the
  491. * appropriate value and wakeup any waiters.
  492. */
  493. spin_lock(&cache->lock);
  494. if (load_cache_only) {
  495. cache->caching_ctl = NULL;
  496. cache->cached = BTRFS_CACHE_NO;
  497. } else {
  498. cache->cached = BTRFS_CACHE_STARTED;
  499. }
  500. spin_unlock(&cache->lock);
  501. wake_up(&caching_ctl->wait);
  502. }
  503. if (load_cache_only) {
  504. put_caching_control(caching_ctl);
  505. return 0;
  506. }
  507. down_write(&fs_info->extent_commit_sem);
  508. atomic_inc(&caching_ctl->count);
  509. list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
  510. up_write(&fs_info->extent_commit_sem);
  511. btrfs_get_block_group(cache);
  512. btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
  513. return ret;
  514. }
  515. /*
  516. * return the block group that starts at or after bytenr
  517. */
  518. static struct btrfs_block_group_cache *
  519. btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
  520. {
  521. struct btrfs_block_group_cache *cache;
  522. cache = block_group_cache_tree_search(info, bytenr, 0);
  523. return cache;
  524. }
  525. /*
  526. * return the block group that contains the given bytenr
  527. */
  528. struct btrfs_block_group_cache *btrfs_lookup_block_group(
  529. struct btrfs_fs_info *info,
  530. u64 bytenr)
  531. {
  532. struct btrfs_block_group_cache *cache;
  533. cache = block_group_cache_tree_search(info, bytenr, 1);
  534. return cache;
  535. }
  536. static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
  537. u64 flags)
  538. {
  539. struct list_head *head = &info->space_info;
  540. struct btrfs_space_info *found;
  541. flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
  542. BTRFS_BLOCK_GROUP_METADATA;
  543. rcu_read_lock();
  544. list_for_each_entry_rcu(found, head, list) {
  545. if (found->flags & flags) {
  546. rcu_read_unlock();
  547. return found;
  548. }
  549. }
  550. rcu_read_unlock();
  551. return NULL;
  552. }
  553. /*
  554. * after adding space to the filesystem, we need to clear the full flags
  555. * on all the space infos.
  556. */
  557. void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
  558. {
  559. struct list_head *head = &info->space_info;
  560. struct btrfs_space_info *found;
  561. rcu_read_lock();
  562. list_for_each_entry_rcu(found, head, list)
  563. found->full = 0;
  564. rcu_read_unlock();
  565. }
  566. static u64 div_factor(u64 num, int factor)
  567. {
  568. if (factor == 10)
  569. return num;
  570. num *= factor;
  571. do_div(num, 10);
  572. return num;
  573. }
  574. static u64 div_factor_fine(u64 num, int factor)
  575. {
  576. if (factor == 100)
  577. return num;
  578. num *= factor;
  579. do_div(num, 100);
  580. return num;
  581. }
  582. u64 btrfs_find_block_group(struct btrfs_root *root,
  583. u64 search_start, u64 search_hint, int owner)
  584. {
  585. struct btrfs_block_group_cache *cache;
  586. u64 used;
  587. u64 last = max(search_hint, search_start);
  588. u64 group_start = 0;
  589. int full_search = 0;
  590. int factor = 9;
  591. int wrapped = 0;
  592. again:
  593. while (1) {
  594. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  595. if (!cache)
  596. break;
  597. spin_lock(&cache->lock);
  598. last = cache->key.objectid + cache->key.offset;
  599. used = btrfs_block_group_used(&cache->item);
  600. if ((full_search || !cache->ro) &&
  601. block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
  602. if (used + cache->pinned + cache->reserved <
  603. div_factor(cache->key.offset, factor)) {
  604. group_start = cache->key.objectid;
  605. spin_unlock(&cache->lock);
  606. btrfs_put_block_group(cache);
  607. goto found;
  608. }
  609. }
  610. spin_unlock(&cache->lock);
  611. btrfs_put_block_group(cache);
  612. cond_resched();
  613. }
  614. if (!wrapped) {
  615. last = search_start;
  616. wrapped = 1;
  617. goto again;
  618. }
  619. if (!full_search && factor < 10) {
  620. last = search_start;
  621. full_search = 1;
  622. factor = 10;
  623. goto again;
  624. }
  625. found:
  626. return group_start;
  627. }
  628. /* simple helper to search for an existing extent at a given offset */
  629. int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
  630. {
  631. int ret;
  632. struct btrfs_key key;
  633. struct btrfs_path *path;
  634. path = btrfs_alloc_path();
  635. if (!path)
  636. return -ENOMEM;
  637. key.objectid = start;
  638. key.offset = len;
  639. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  640. ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
  641. 0, 0);
  642. btrfs_free_path(path);
  643. return ret;
  644. }
  645. /*
  646. * helper function to lookup reference count and flags of extent.
  647. *
  648. * the head node for delayed ref is used to store the sum of all the
  649. * reference count modifications queued up in the rbtree. the head
  650. * node may also store the extent flags to set. This way you can check
  651. * to see what the reference count and extent flags would be if all of
  652. * the delayed refs are not processed.
  653. */
  654. int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
  655. struct btrfs_root *root, u64 bytenr,
  656. u64 num_bytes, u64 *refs, u64 *flags)
  657. {
  658. struct btrfs_delayed_ref_head *head;
  659. struct btrfs_delayed_ref_root *delayed_refs;
  660. struct btrfs_path *path;
  661. struct btrfs_extent_item *ei;
  662. struct extent_buffer *leaf;
  663. struct btrfs_key key;
  664. u32 item_size;
  665. u64 num_refs;
  666. u64 extent_flags;
  667. int ret;
  668. path = btrfs_alloc_path();
  669. if (!path)
  670. return -ENOMEM;
  671. key.objectid = bytenr;
  672. key.type = BTRFS_EXTENT_ITEM_KEY;
  673. key.offset = num_bytes;
  674. if (!trans) {
  675. path->skip_locking = 1;
  676. path->search_commit_root = 1;
  677. }
  678. again:
  679. ret = btrfs_search_slot(trans, root->fs_info->extent_root,
  680. &key, path, 0, 0);
  681. if (ret < 0)
  682. goto out_free;
  683. if (ret == 0) {
  684. leaf = path->nodes[0];
  685. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  686. if (item_size >= sizeof(*ei)) {
  687. ei = btrfs_item_ptr(leaf, path->slots[0],
  688. struct btrfs_extent_item);
  689. num_refs = btrfs_extent_refs(leaf, ei);
  690. extent_flags = btrfs_extent_flags(leaf, ei);
  691. } else {
  692. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  693. struct btrfs_extent_item_v0 *ei0;
  694. BUG_ON(item_size != sizeof(*ei0));
  695. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  696. struct btrfs_extent_item_v0);
  697. num_refs = btrfs_extent_refs_v0(leaf, ei0);
  698. /* FIXME: this isn't correct for data */
  699. extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  700. #else
  701. BUG();
  702. #endif
  703. }
  704. BUG_ON(num_refs == 0);
  705. } else {
  706. num_refs = 0;
  707. extent_flags = 0;
  708. ret = 0;
  709. }
  710. if (!trans)
  711. goto out;
  712. delayed_refs = &trans->transaction->delayed_refs;
  713. spin_lock(&delayed_refs->lock);
  714. head = btrfs_find_delayed_ref_head(trans, bytenr);
  715. if (head) {
  716. if (!mutex_trylock(&head->mutex)) {
  717. atomic_inc(&head->node.refs);
  718. spin_unlock(&delayed_refs->lock);
  719. btrfs_release_path(path);
  720. /*
  721. * Mutex was contended, block until it's released and try
  722. * again
  723. */
  724. mutex_lock(&head->mutex);
  725. mutex_unlock(&head->mutex);
  726. btrfs_put_delayed_ref(&head->node);
  727. goto again;
  728. }
  729. if (head->extent_op && head->extent_op->update_flags)
  730. extent_flags |= head->extent_op->flags_to_set;
  731. else
  732. BUG_ON(num_refs == 0);
  733. num_refs += head->node.ref_mod;
  734. mutex_unlock(&head->mutex);
  735. }
  736. spin_unlock(&delayed_refs->lock);
  737. out:
  738. WARN_ON(num_refs == 0);
  739. if (refs)
  740. *refs = num_refs;
  741. if (flags)
  742. *flags = extent_flags;
  743. out_free:
  744. btrfs_free_path(path);
  745. return ret;
  746. }
  747. /*
  748. * Back reference rules. Back refs have three main goals:
  749. *
  750. * 1) differentiate between all holders of references to an extent so that
  751. * when a reference is dropped we can make sure it was a valid reference
  752. * before freeing the extent.
  753. *
  754. * 2) Provide enough information to quickly find the holders of an extent
  755. * if we notice a given block is corrupted or bad.
  756. *
  757. * 3) Make it easy to migrate blocks for FS shrinking or storage pool
  758. * maintenance. This is actually the same as #2, but with a slightly
  759. * different use case.
  760. *
  761. * There are two kinds of back refs. The implicit back refs is optimized
  762. * for pointers in non-shared tree blocks. For a given pointer in a block,
  763. * back refs of this kind provide information about the block's owner tree
  764. * and the pointer's key. These information allow us to find the block by
  765. * b-tree searching. The full back refs is for pointers in tree blocks not
  766. * referenced by their owner trees. The location of tree block is recorded
  767. * in the back refs. Actually the full back refs is generic, and can be
  768. * used in all cases the implicit back refs is used. The major shortcoming
  769. * of the full back refs is its overhead. Every time a tree block gets
  770. * COWed, we have to update back refs entry for all pointers in it.
  771. *
  772. * For a newly allocated tree block, we use implicit back refs for
  773. * pointers in it. This means most tree related operations only involve
  774. * implicit back refs. For a tree block created in old transaction, the
  775. * only way to drop a reference to it is COW it. So we can detect the
  776. * event that tree block loses its owner tree's reference and do the
  777. * back refs conversion.
  778. *
  779. * When a tree block is COW'd through a tree, there are four cases:
  780. *
  781. * The reference count of the block is one and the tree is the block's
  782. * owner tree. Nothing to do in this case.
  783. *
  784. * The reference count of the block is one and the tree is not the
  785. * block's owner tree. In this case, full back refs is used for pointers
  786. * in the block. Remove these full back refs, add implicit back refs for
  787. * every pointers in the new block.
  788. *
  789. * The reference count of the block is greater than one and the tree is
  790. * the block's owner tree. In this case, implicit back refs is used for
  791. * pointers in the block. Add full back refs for every pointers in the
  792. * block, increase lower level extents' reference counts. The original
  793. * implicit back refs are entailed to the new block.
  794. *
  795. * The reference count of the block is greater than one and the tree is
  796. * not the block's owner tree. Add implicit back refs for every pointer in
  797. * the new block, increase lower level extents' reference count.
  798. *
  799. * Back Reference Key composing:
  800. *
  801. * The key objectid corresponds to the first byte in the extent,
  802. * The key type is used to differentiate between types of back refs.
  803. * There are different meanings of the key offset for different types
  804. * of back refs.
  805. *
  806. * File extents can be referenced by:
  807. *
  808. * - multiple snapshots, subvolumes, or different generations in one subvol
  809. * - different files inside a single subvolume
  810. * - different offsets inside a file (bookend extents in file.c)
  811. *
  812. * The extent ref structure for the implicit back refs has fields for:
  813. *
  814. * - Objectid of the subvolume root
  815. * - objectid of the file holding the reference
  816. * - original offset in the file
  817. * - how many bookend extents
  818. *
  819. * The key offset for the implicit back refs is hash of the first
  820. * three fields.
  821. *
  822. * The extent ref structure for the full back refs has field for:
  823. *
  824. * - number of pointers in the tree leaf
  825. *
  826. * The key offset for the implicit back refs is the first byte of
  827. * the tree leaf
  828. *
  829. * When a file extent is allocated, The implicit back refs is used.
  830. * the fields are filled in:
  831. *
  832. * (root_key.objectid, inode objectid, offset in file, 1)
  833. *
  834. * When a file extent is removed file truncation, we find the
  835. * corresponding implicit back refs and check the following fields:
  836. *
  837. * (btrfs_header_owner(leaf), inode objectid, offset in file)
  838. *
  839. * Btree extents can be referenced by:
  840. *
  841. * - Different subvolumes
  842. *
  843. * Both the implicit back refs and the full back refs for tree blocks
  844. * only consist of key. The key offset for the implicit back refs is
  845. * objectid of block's owner tree. The key offset for the full back refs
  846. * is the first byte of parent block.
  847. *
  848. * When implicit back refs is used, information about the lowest key and
  849. * level of the tree block are required. These information are stored in
  850. * tree block info structure.
  851. */
  852. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  853. static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
  854. struct btrfs_root *root,
  855. struct btrfs_path *path,
  856. u64 owner, u32 extra_size)
  857. {
  858. struct btrfs_extent_item *item;
  859. struct btrfs_extent_item_v0 *ei0;
  860. struct btrfs_extent_ref_v0 *ref0;
  861. struct btrfs_tree_block_info *bi;
  862. struct extent_buffer *leaf;
  863. struct btrfs_key key;
  864. struct btrfs_key found_key;
  865. u32 new_size = sizeof(*item);
  866. u64 refs;
  867. int ret;
  868. leaf = path->nodes[0];
  869. BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
  870. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  871. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  872. struct btrfs_extent_item_v0);
  873. refs = btrfs_extent_refs_v0(leaf, ei0);
  874. if (owner == (u64)-1) {
  875. while (1) {
  876. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  877. ret = btrfs_next_leaf(root, path);
  878. if (ret < 0)
  879. return ret;
  880. BUG_ON(ret > 0);
  881. leaf = path->nodes[0];
  882. }
  883. btrfs_item_key_to_cpu(leaf, &found_key,
  884. path->slots[0]);
  885. BUG_ON(key.objectid != found_key.objectid);
  886. if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
  887. path->slots[0]++;
  888. continue;
  889. }
  890. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  891. struct btrfs_extent_ref_v0);
  892. owner = btrfs_ref_objectid_v0(leaf, ref0);
  893. break;
  894. }
  895. }
  896. btrfs_release_path(path);
  897. if (owner < BTRFS_FIRST_FREE_OBJECTID)
  898. new_size += sizeof(*bi);
  899. new_size -= sizeof(*ei0);
  900. ret = btrfs_search_slot(trans, root, &key, path,
  901. new_size + extra_size, 1);
  902. if (ret < 0)
  903. return ret;
  904. BUG_ON(ret);
  905. ret = btrfs_extend_item(trans, root, path, new_size);
  906. leaf = path->nodes[0];
  907. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  908. btrfs_set_extent_refs(leaf, item, refs);
  909. /* FIXME: get real generation */
  910. btrfs_set_extent_generation(leaf, item, 0);
  911. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  912. btrfs_set_extent_flags(leaf, item,
  913. BTRFS_EXTENT_FLAG_TREE_BLOCK |
  914. BTRFS_BLOCK_FLAG_FULL_BACKREF);
  915. bi = (struct btrfs_tree_block_info *)(item + 1);
  916. /* FIXME: get first key of the block */
  917. memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
  918. btrfs_set_tree_block_level(leaf, bi, (int)owner);
  919. } else {
  920. btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
  921. }
  922. btrfs_mark_buffer_dirty(leaf);
  923. return 0;
  924. }
  925. #endif
  926. static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
  927. {
  928. u32 high_crc = ~(u32)0;
  929. u32 low_crc = ~(u32)0;
  930. __le64 lenum;
  931. lenum = cpu_to_le64(root_objectid);
  932. high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
  933. lenum = cpu_to_le64(owner);
  934. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  935. lenum = cpu_to_le64(offset);
  936. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  937. return ((u64)high_crc << 31) ^ (u64)low_crc;
  938. }
  939. static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
  940. struct btrfs_extent_data_ref *ref)
  941. {
  942. return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
  943. btrfs_extent_data_ref_objectid(leaf, ref),
  944. btrfs_extent_data_ref_offset(leaf, ref));
  945. }
  946. static int match_extent_data_ref(struct extent_buffer *leaf,
  947. struct btrfs_extent_data_ref *ref,
  948. u64 root_objectid, u64 owner, u64 offset)
  949. {
  950. if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
  951. btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
  952. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  953. return 0;
  954. return 1;
  955. }
  956. static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
  957. struct btrfs_root *root,
  958. struct btrfs_path *path,
  959. u64 bytenr, u64 parent,
  960. u64 root_objectid,
  961. u64 owner, u64 offset)
  962. {
  963. struct btrfs_key key;
  964. struct btrfs_extent_data_ref *ref;
  965. struct extent_buffer *leaf;
  966. u32 nritems;
  967. int ret;
  968. int recow;
  969. int err = -ENOENT;
  970. key.objectid = bytenr;
  971. if (parent) {
  972. key.type = BTRFS_SHARED_DATA_REF_KEY;
  973. key.offset = parent;
  974. } else {
  975. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  976. key.offset = hash_extent_data_ref(root_objectid,
  977. owner, offset);
  978. }
  979. again:
  980. recow = 0;
  981. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  982. if (ret < 0) {
  983. err = ret;
  984. goto fail;
  985. }
  986. if (parent) {
  987. if (!ret)
  988. return 0;
  989. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  990. key.type = BTRFS_EXTENT_REF_V0_KEY;
  991. btrfs_release_path(path);
  992. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  993. if (ret < 0) {
  994. err = ret;
  995. goto fail;
  996. }
  997. if (!ret)
  998. return 0;
  999. #endif
  1000. goto fail;
  1001. }
  1002. leaf = path->nodes[0];
  1003. nritems = btrfs_header_nritems(leaf);
  1004. while (1) {
  1005. if (path->slots[0] >= nritems) {
  1006. ret = btrfs_next_leaf(root, path);
  1007. if (ret < 0)
  1008. err = ret;
  1009. if (ret)
  1010. goto fail;
  1011. leaf = path->nodes[0];
  1012. nritems = btrfs_header_nritems(leaf);
  1013. recow = 1;
  1014. }
  1015. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1016. if (key.objectid != bytenr ||
  1017. key.type != BTRFS_EXTENT_DATA_REF_KEY)
  1018. goto fail;
  1019. ref = btrfs_item_ptr(leaf, path->slots[0],
  1020. struct btrfs_extent_data_ref);
  1021. if (match_extent_data_ref(leaf, ref, root_objectid,
  1022. owner, offset)) {
  1023. if (recow) {
  1024. btrfs_release_path(path);
  1025. goto again;
  1026. }
  1027. err = 0;
  1028. break;
  1029. }
  1030. path->slots[0]++;
  1031. }
  1032. fail:
  1033. return err;
  1034. }
  1035. static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
  1036. struct btrfs_root *root,
  1037. struct btrfs_path *path,
  1038. u64 bytenr, u64 parent,
  1039. u64 root_objectid, u64 owner,
  1040. u64 offset, int refs_to_add)
  1041. {
  1042. struct btrfs_key key;
  1043. struct extent_buffer *leaf;
  1044. u32 size;
  1045. u32 num_refs;
  1046. int ret;
  1047. key.objectid = bytenr;
  1048. if (parent) {
  1049. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1050. key.offset = parent;
  1051. size = sizeof(struct btrfs_shared_data_ref);
  1052. } else {
  1053. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1054. key.offset = hash_extent_data_ref(root_objectid,
  1055. owner, offset);
  1056. size = sizeof(struct btrfs_extent_data_ref);
  1057. }
  1058. ret = btrfs_insert_empty_item(trans, root, path, &key, size);
  1059. if (ret && ret != -EEXIST)
  1060. goto fail;
  1061. leaf = path->nodes[0];
  1062. if (parent) {
  1063. struct btrfs_shared_data_ref *ref;
  1064. ref = btrfs_item_ptr(leaf, path->slots[0],
  1065. struct btrfs_shared_data_ref);
  1066. if (ret == 0) {
  1067. btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
  1068. } else {
  1069. num_refs = btrfs_shared_data_ref_count(leaf, ref);
  1070. num_refs += refs_to_add;
  1071. btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
  1072. }
  1073. } else {
  1074. struct btrfs_extent_data_ref *ref;
  1075. while (ret == -EEXIST) {
  1076. ref = btrfs_item_ptr(leaf, path->slots[0],
  1077. struct btrfs_extent_data_ref);
  1078. if (match_extent_data_ref(leaf, ref, root_objectid,
  1079. owner, offset))
  1080. break;
  1081. btrfs_release_path(path);
  1082. key.offset++;
  1083. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1084. size);
  1085. if (ret && ret != -EEXIST)
  1086. goto fail;
  1087. leaf = path->nodes[0];
  1088. }
  1089. ref = btrfs_item_ptr(leaf, path->slots[0],
  1090. struct btrfs_extent_data_ref);
  1091. if (ret == 0) {
  1092. btrfs_set_extent_data_ref_root(leaf, ref,
  1093. root_objectid);
  1094. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  1095. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  1096. btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
  1097. } else {
  1098. num_refs = btrfs_extent_data_ref_count(leaf, ref);
  1099. num_refs += refs_to_add;
  1100. btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
  1101. }
  1102. }
  1103. btrfs_mark_buffer_dirty(leaf);
  1104. ret = 0;
  1105. fail:
  1106. btrfs_release_path(path);
  1107. return ret;
  1108. }
  1109. static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
  1110. struct btrfs_root *root,
  1111. struct btrfs_path *path,
  1112. int refs_to_drop)
  1113. {
  1114. struct btrfs_key key;
  1115. struct btrfs_extent_data_ref *ref1 = NULL;
  1116. struct btrfs_shared_data_ref *ref2 = NULL;
  1117. struct extent_buffer *leaf;
  1118. u32 num_refs = 0;
  1119. int ret = 0;
  1120. leaf = path->nodes[0];
  1121. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1122. if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1123. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1124. struct btrfs_extent_data_ref);
  1125. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1126. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1127. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1128. struct btrfs_shared_data_ref);
  1129. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1130. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1131. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1132. struct btrfs_extent_ref_v0 *ref0;
  1133. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1134. struct btrfs_extent_ref_v0);
  1135. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1136. #endif
  1137. } else {
  1138. BUG();
  1139. }
  1140. BUG_ON(num_refs < refs_to_drop);
  1141. num_refs -= refs_to_drop;
  1142. if (num_refs == 0) {
  1143. ret = btrfs_del_item(trans, root, path);
  1144. } else {
  1145. if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
  1146. btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
  1147. else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
  1148. btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
  1149. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1150. else {
  1151. struct btrfs_extent_ref_v0 *ref0;
  1152. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1153. struct btrfs_extent_ref_v0);
  1154. btrfs_set_ref_count_v0(leaf, ref0, num_refs);
  1155. }
  1156. #endif
  1157. btrfs_mark_buffer_dirty(leaf);
  1158. }
  1159. return ret;
  1160. }
  1161. static noinline u32 extent_data_ref_count(struct btrfs_root *root,
  1162. struct btrfs_path *path,
  1163. struct btrfs_extent_inline_ref *iref)
  1164. {
  1165. struct btrfs_key key;
  1166. struct extent_buffer *leaf;
  1167. struct btrfs_extent_data_ref *ref1;
  1168. struct btrfs_shared_data_ref *ref2;
  1169. u32 num_refs = 0;
  1170. leaf = path->nodes[0];
  1171. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1172. if (iref) {
  1173. if (btrfs_extent_inline_ref_type(leaf, iref) ==
  1174. BTRFS_EXTENT_DATA_REF_KEY) {
  1175. ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
  1176. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1177. } else {
  1178. ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
  1179. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1180. }
  1181. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1182. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1183. struct btrfs_extent_data_ref);
  1184. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1185. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1186. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1187. struct btrfs_shared_data_ref);
  1188. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1189. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1190. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1191. struct btrfs_extent_ref_v0 *ref0;
  1192. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1193. struct btrfs_extent_ref_v0);
  1194. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1195. #endif
  1196. } else {
  1197. WARN_ON(1);
  1198. }
  1199. return num_refs;
  1200. }
  1201. static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
  1202. struct btrfs_root *root,
  1203. struct btrfs_path *path,
  1204. u64 bytenr, u64 parent,
  1205. u64 root_objectid)
  1206. {
  1207. struct btrfs_key key;
  1208. int ret;
  1209. key.objectid = bytenr;
  1210. if (parent) {
  1211. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1212. key.offset = parent;
  1213. } else {
  1214. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1215. key.offset = root_objectid;
  1216. }
  1217. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1218. if (ret > 0)
  1219. ret = -ENOENT;
  1220. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1221. if (ret == -ENOENT && parent) {
  1222. btrfs_release_path(path);
  1223. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1224. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1225. if (ret > 0)
  1226. ret = -ENOENT;
  1227. }
  1228. #endif
  1229. return ret;
  1230. }
  1231. static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
  1232. struct btrfs_root *root,
  1233. struct btrfs_path *path,
  1234. u64 bytenr, u64 parent,
  1235. u64 root_objectid)
  1236. {
  1237. struct btrfs_key key;
  1238. int ret;
  1239. key.objectid = bytenr;
  1240. if (parent) {
  1241. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1242. key.offset = parent;
  1243. } else {
  1244. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1245. key.offset = root_objectid;
  1246. }
  1247. ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
  1248. btrfs_release_path(path);
  1249. return ret;
  1250. }
  1251. static inline int extent_ref_type(u64 parent, u64 owner)
  1252. {
  1253. int type;
  1254. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1255. if (parent > 0)
  1256. type = BTRFS_SHARED_BLOCK_REF_KEY;
  1257. else
  1258. type = BTRFS_TREE_BLOCK_REF_KEY;
  1259. } else {
  1260. if (parent > 0)
  1261. type = BTRFS_SHARED_DATA_REF_KEY;
  1262. else
  1263. type = BTRFS_EXTENT_DATA_REF_KEY;
  1264. }
  1265. return type;
  1266. }
  1267. static int find_next_key(struct btrfs_path *path, int level,
  1268. struct btrfs_key *key)
  1269. {
  1270. for (; level < BTRFS_MAX_LEVEL; level++) {
  1271. if (!path->nodes[level])
  1272. break;
  1273. if (path->slots[level] + 1 >=
  1274. btrfs_header_nritems(path->nodes[level]))
  1275. continue;
  1276. if (level == 0)
  1277. btrfs_item_key_to_cpu(path->nodes[level], key,
  1278. path->slots[level] + 1);
  1279. else
  1280. btrfs_node_key_to_cpu(path->nodes[level], key,
  1281. path->slots[level] + 1);
  1282. return 0;
  1283. }
  1284. return 1;
  1285. }
  1286. /*
  1287. * look for inline back ref. if back ref is found, *ref_ret is set
  1288. * to the address of inline back ref, and 0 is returned.
  1289. *
  1290. * if back ref isn't found, *ref_ret is set to the address where it
  1291. * should be inserted, and -ENOENT is returned.
  1292. *
  1293. * if insert is true and there are too many inline back refs, the path
  1294. * points to the extent item, and -EAGAIN is returned.
  1295. *
  1296. * NOTE: inline back refs are ordered in the same way that back ref
  1297. * items in the tree are ordered.
  1298. */
  1299. static noinline_for_stack
  1300. int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1301. struct btrfs_root *root,
  1302. struct btrfs_path *path,
  1303. struct btrfs_extent_inline_ref **ref_ret,
  1304. u64 bytenr, u64 num_bytes,
  1305. u64 parent, u64 root_objectid,
  1306. u64 owner, u64 offset, int insert)
  1307. {
  1308. struct btrfs_key key;
  1309. struct extent_buffer *leaf;
  1310. struct btrfs_extent_item *ei;
  1311. struct btrfs_extent_inline_ref *iref;
  1312. u64 flags;
  1313. u64 item_size;
  1314. unsigned long ptr;
  1315. unsigned long end;
  1316. int extra_size;
  1317. int type;
  1318. int want;
  1319. int ret;
  1320. int err = 0;
  1321. key.objectid = bytenr;
  1322. key.type = BTRFS_EXTENT_ITEM_KEY;
  1323. key.offset = num_bytes;
  1324. want = extent_ref_type(parent, owner);
  1325. if (insert) {
  1326. extra_size = btrfs_extent_inline_ref_size(want);
  1327. path->keep_locks = 1;
  1328. } else
  1329. extra_size = -1;
  1330. ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
  1331. if (ret < 0) {
  1332. err = ret;
  1333. goto out;
  1334. }
  1335. BUG_ON(ret);
  1336. leaf = path->nodes[0];
  1337. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1338. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1339. if (item_size < sizeof(*ei)) {
  1340. if (!insert) {
  1341. err = -ENOENT;
  1342. goto out;
  1343. }
  1344. ret = convert_extent_item_v0(trans, root, path, owner,
  1345. extra_size);
  1346. if (ret < 0) {
  1347. err = ret;
  1348. goto out;
  1349. }
  1350. leaf = path->nodes[0];
  1351. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1352. }
  1353. #endif
  1354. BUG_ON(item_size < sizeof(*ei));
  1355. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1356. flags = btrfs_extent_flags(leaf, ei);
  1357. ptr = (unsigned long)(ei + 1);
  1358. end = (unsigned long)ei + item_size;
  1359. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  1360. ptr += sizeof(struct btrfs_tree_block_info);
  1361. BUG_ON(ptr > end);
  1362. } else {
  1363. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
  1364. }
  1365. err = -ENOENT;
  1366. while (1) {
  1367. if (ptr >= end) {
  1368. WARN_ON(ptr > end);
  1369. break;
  1370. }
  1371. iref = (struct btrfs_extent_inline_ref *)ptr;
  1372. type = btrfs_extent_inline_ref_type(leaf, iref);
  1373. if (want < type)
  1374. break;
  1375. if (want > type) {
  1376. ptr += btrfs_extent_inline_ref_size(type);
  1377. continue;
  1378. }
  1379. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1380. struct btrfs_extent_data_ref *dref;
  1381. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1382. if (match_extent_data_ref(leaf, dref, root_objectid,
  1383. owner, offset)) {
  1384. err = 0;
  1385. break;
  1386. }
  1387. if (hash_extent_data_ref_item(leaf, dref) <
  1388. hash_extent_data_ref(root_objectid, owner, offset))
  1389. break;
  1390. } else {
  1391. u64 ref_offset;
  1392. ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
  1393. if (parent > 0) {
  1394. if (parent == ref_offset) {
  1395. err = 0;
  1396. break;
  1397. }
  1398. if (ref_offset < parent)
  1399. break;
  1400. } else {
  1401. if (root_objectid == ref_offset) {
  1402. err = 0;
  1403. break;
  1404. }
  1405. if (ref_offset < root_objectid)
  1406. break;
  1407. }
  1408. }
  1409. ptr += btrfs_extent_inline_ref_size(type);
  1410. }
  1411. if (err == -ENOENT && insert) {
  1412. if (item_size + extra_size >=
  1413. BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
  1414. err = -EAGAIN;
  1415. goto out;
  1416. }
  1417. /*
  1418. * To add new inline back ref, we have to make sure
  1419. * there is no corresponding back ref item.
  1420. * For simplicity, we just do not add new inline back
  1421. * ref if there is any kind of item for this block
  1422. */
  1423. if (find_next_key(path, 0, &key) == 0 &&
  1424. key.objectid == bytenr &&
  1425. key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
  1426. err = -EAGAIN;
  1427. goto out;
  1428. }
  1429. }
  1430. *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
  1431. out:
  1432. if (insert) {
  1433. path->keep_locks = 0;
  1434. btrfs_unlock_up_safe(path, 1);
  1435. }
  1436. return err;
  1437. }
  1438. /*
  1439. * helper to add new inline back ref
  1440. */
  1441. static noinline_for_stack
  1442. int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1443. struct btrfs_root *root,
  1444. struct btrfs_path *path,
  1445. struct btrfs_extent_inline_ref *iref,
  1446. u64 parent, u64 root_objectid,
  1447. u64 owner, u64 offset, int refs_to_add,
  1448. struct btrfs_delayed_extent_op *extent_op)
  1449. {
  1450. struct extent_buffer *leaf;
  1451. struct btrfs_extent_item *ei;
  1452. unsigned long ptr;
  1453. unsigned long end;
  1454. unsigned long item_offset;
  1455. u64 refs;
  1456. int size;
  1457. int type;
  1458. int ret;
  1459. leaf = path->nodes[0];
  1460. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1461. item_offset = (unsigned long)iref - (unsigned long)ei;
  1462. type = extent_ref_type(parent, owner);
  1463. size = btrfs_extent_inline_ref_size(type);
  1464. ret = btrfs_extend_item(trans, root, path, size);
  1465. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1466. refs = btrfs_extent_refs(leaf, ei);
  1467. refs += refs_to_add;
  1468. btrfs_set_extent_refs(leaf, ei, refs);
  1469. if (extent_op)
  1470. __run_delayed_extent_op(extent_op, leaf, ei);
  1471. ptr = (unsigned long)ei + item_offset;
  1472. end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
  1473. if (ptr < end - size)
  1474. memmove_extent_buffer(leaf, ptr + size, ptr,
  1475. end - size - ptr);
  1476. iref = (struct btrfs_extent_inline_ref *)ptr;
  1477. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  1478. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1479. struct btrfs_extent_data_ref *dref;
  1480. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1481. btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
  1482. btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
  1483. btrfs_set_extent_data_ref_offset(leaf, dref, offset);
  1484. btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
  1485. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1486. struct btrfs_shared_data_ref *sref;
  1487. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1488. btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
  1489. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1490. } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  1491. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1492. } else {
  1493. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  1494. }
  1495. btrfs_mark_buffer_dirty(leaf);
  1496. return 0;
  1497. }
  1498. static int lookup_extent_backref(struct btrfs_trans_handle *trans,
  1499. struct btrfs_root *root,
  1500. struct btrfs_path *path,
  1501. struct btrfs_extent_inline_ref **ref_ret,
  1502. u64 bytenr, u64 num_bytes, u64 parent,
  1503. u64 root_objectid, u64 owner, u64 offset)
  1504. {
  1505. int ret;
  1506. ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
  1507. bytenr, num_bytes, parent,
  1508. root_objectid, owner, offset, 0);
  1509. if (ret != -ENOENT)
  1510. return ret;
  1511. btrfs_release_path(path);
  1512. *ref_ret = NULL;
  1513. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1514. ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
  1515. root_objectid);
  1516. } else {
  1517. ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
  1518. root_objectid, owner, offset);
  1519. }
  1520. return ret;
  1521. }
  1522. /*
  1523. * helper to update/remove inline back ref
  1524. */
  1525. static noinline_for_stack
  1526. int update_inline_extent_backref(struct btrfs_trans_handle *trans,
  1527. struct btrfs_root *root,
  1528. struct btrfs_path *path,
  1529. struct btrfs_extent_inline_ref *iref,
  1530. int refs_to_mod,
  1531. struct btrfs_delayed_extent_op *extent_op)
  1532. {
  1533. struct extent_buffer *leaf;
  1534. struct btrfs_extent_item *ei;
  1535. struct btrfs_extent_data_ref *dref = NULL;
  1536. struct btrfs_shared_data_ref *sref = NULL;
  1537. unsigned long ptr;
  1538. unsigned long end;
  1539. u32 item_size;
  1540. int size;
  1541. int type;
  1542. int ret;
  1543. u64 refs;
  1544. leaf = path->nodes[0];
  1545. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1546. refs = btrfs_extent_refs(leaf, ei);
  1547. WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
  1548. refs += refs_to_mod;
  1549. btrfs_set_extent_refs(leaf, ei, refs);
  1550. if (extent_op)
  1551. __run_delayed_extent_op(extent_op, leaf, ei);
  1552. type = btrfs_extent_inline_ref_type(leaf, iref);
  1553. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1554. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1555. refs = btrfs_extent_data_ref_count(leaf, dref);
  1556. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1557. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1558. refs = btrfs_shared_data_ref_count(leaf, sref);
  1559. } else {
  1560. refs = 1;
  1561. BUG_ON(refs_to_mod != -1);
  1562. }
  1563. BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
  1564. refs += refs_to_mod;
  1565. if (refs > 0) {
  1566. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  1567. btrfs_set_extent_data_ref_count(leaf, dref, refs);
  1568. else
  1569. btrfs_set_shared_data_ref_count(leaf, sref, refs);
  1570. } else {
  1571. size = btrfs_extent_inline_ref_size(type);
  1572. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1573. ptr = (unsigned long)iref;
  1574. end = (unsigned long)ei + item_size;
  1575. if (ptr + size < end)
  1576. memmove_extent_buffer(leaf, ptr, ptr + size,
  1577. end - ptr - size);
  1578. item_size -= size;
  1579. ret = btrfs_truncate_item(trans, root, path, item_size, 1);
  1580. }
  1581. btrfs_mark_buffer_dirty(leaf);
  1582. return 0;
  1583. }
  1584. static noinline_for_stack
  1585. int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
  1586. struct btrfs_root *root,
  1587. struct btrfs_path *path,
  1588. u64 bytenr, u64 num_bytes, u64 parent,
  1589. u64 root_objectid, u64 owner,
  1590. u64 offset, int refs_to_add,
  1591. struct btrfs_delayed_extent_op *extent_op)
  1592. {
  1593. struct btrfs_extent_inline_ref *iref;
  1594. int ret;
  1595. ret = lookup_inline_extent_backref(trans, root, path, &iref,
  1596. bytenr, num_bytes, parent,
  1597. root_objectid, owner, offset, 1);
  1598. if (ret == 0) {
  1599. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
  1600. ret = update_inline_extent_backref(trans, root, path, iref,
  1601. refs_to_add, extent_op);
  1602. } else if (ret == -ENOENT) {
  1603. ret = setup_inline_extent_backref(trans, root, path, iref,
  1604. parent, root_objectid,
  1605. owner, offset, refs_to_add,
  1606. extent_op);
  1607. }
  1608. return ret;
  1609. }
  1610. static int insert_extent_backref(struct btrfs_trans_handle *trans,
  1611. struct btrfs_root *root,
  1612. struct btrfs_path *path,
  1613. u64 bytenr, u64 parent, u64 root_objectid,
  1614. u64 owner, u64 offset, int refs_to_add)
  1615. {
  1616. int ret;
  1617. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1618. BUG_ON(refs_to_add != 1);
  1619. ret = insert_tree_block_ref(trans, root, path, bytenr,
  1620. parent, root_objectid);
  1621. } else {
  1622. ret = insert_extent_data_ref(trans, root, path, bytenr,
  1623. parent, root_objectid,
  1624. owner, offset, refs_to_add);
  1625. }
  1626. return ret;
  1627. }
  1628. static int remove_extent_backref(struct btrfs_trans_handle *trans,
  1629. struct btrfs_root *root,
  1630. struct btrfs_path *path,
  1631. struct btrfs_extent_inline_ref *iref,
  1632. int refs_to_drop, int is_data)
  1633. {
  1634. int ret;
  1635. BUG_ON(!is_data && refs_to_drop != 1);
  1636. if (iref) {
  1637. ret = update_inline_extent_backref(trans, root, path, iref,
  1638. -refs_to_drop, NULL);
  1639. } else if (is_data) {
  1640. ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
  1641. } else {
  1642. ret = btrfs_del_item(trans, root, path);
  1643. }
  1644. return ret;
  1645. }
  1646. static int btrfs_issue_discard(struct block_device *bdev,
  1647. u64 start, u64 len)
  1648. {
  1649. return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
  1650. }
  1651. static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
  1652. u64 num_bytes, u64 *actual_bytes)
  1653. {
  1654. int ret;
  1655. u64 discarded_bytes = 0;
  1656. struct btrfs_bio *bbio = NULL;
  1657. /* Tell the block device(s) that the sectors can be discarded */
  1658. ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
  1659. bytenr, &num_bytes, &bbio, 0);
  1660. if (!ret) {
  1661. struct btrfs_bio_stripe *stripe = bbio->stripes;
  1662. int i;
  1663. for (i = 0; i < bbio->num_stripes; i++, stripe++) {
  1664. if (!stripe->dev->can_discard)
  1665. continue;
  1666. ret = btrfs_issue_discard(stripe->dev->bdev,
  1667. stripe->physical,
  1668. stripe->length);
  1669. if (!ret)
  1670. discarded_bytes += stripe->length;
  1671. else if (ret != -EOPNOTSUPP)
  1672. break;
  1673. /*
  1674. * Just in case we get back EOPNOTSUPP for some reason,
  1675. * just ignore the return value so we don't screw up
  1676. * people calling discard_extent.
  1677. */
  1678. ret = 0;
  1679. }
  1680. kfree(bbio);
  1681. }
  1682. if (actual_bytes)
  1683. *actual_bytes = discarded_bytes;
  1684. return ret;
  1685. }
  1686. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1687. struct btrfs_root *root,
  1688. u64 bytenr, u64 num_bytes, u64 parent,
  1689. u64 root_objectid, u64 owner, u64 offset)
  1690. {
  1691. int ret;
  1692. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
  1693. root_objectid == BTRFS_TREE_LOG_OBJECTID);
  1694. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1695. ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
  1696. parent, root_objectid, (int)owner,
  1697. BTRFS_ADD_DELAYED_REF, NULL);
  1698. } else {
  1699. ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
  1700. parent, root_objectid, owner, offset,
  1701. BTRFS_ADD_DELAYED_REF, NULL);
  1702. }
  1703. return ret;
  1704. }
  1705. static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1706. struct btrfs_root *root,
  1707. u64 bytenr, u64 num_bytes,
  1708. u64 parent, u64 root_objectid,
  1709. u64 owner, u64 offset, int refs_to_add,
  1710. struct btrfs_delayed_extent_op *extent_op)
  1711. {
  1712. struct btrfs_path *path;
  1713. struct extent_buffer *leaf;
  1714. struct btrfs_extent_item *item;
  1715. u64 refs;
  1716. int ret;
  1717. int err = 0;
  1718. path = btrfs_alloc_path();
  1719. if (!path)
  1720. return -ENOMEM;
  1721. path->reada = 1;
  1722. path->leave_spinning = 1;
  1723. /* this will setup the path even if it fails to insert the back ref */
  1724. ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
  1725. path, bytenr, num_bytes, parent,
  1726. root_objectid, owner, offset,
  1727. refs_to_add, extent_op);
  1728. if (ret == 0)
  1729. goto out;
  1730. if (ret != -EAGAIN) {
  1731. err = ret;
  1732. goto out;
  1733. }
  1734. leaf = path->nodes[0];
  1735. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1736. refs = btrfs_extent_refs(leaf, item);
  1737. btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
  1738. if (extent_op)
  1739. __run_delayed_extent_op(extent_op, leaf, item);
  1740. btrfs_mark_buffer_dirty(leaf);
  1741. btrfs_release_path(path);
  1742. path->reada = 1;
  1743. path->leave_spinning = 1;
  1744. /* now insert the actual backref */
  1745. ret = insert_extent_backref(trans, root->fs_info->extent_root,
  1746. path, bytenr, parent, root_objectid,
  1747. owner, offset, refs_to_add);
  1748. BUG_ON(ret);
  1749. out:
  1750. btrfs_free_path(path);
  1751. return err;
  1752. }
  1753. static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
  1754. struct btrfs_root *root,
  1755. struct btrfs_delayed_ref_node *node,
  1756. struct btrfs_delayed_extent_op *extent_op,
  1757. int insert_reserved)
  1758. {
  1759. int ret = 0;
  1760. struct btrfs_delayed_data_ref *ref;
  1761. struct btrfs_key ins;
  1762. u64 parent = 0;
  1763. u64 ref_root = 0;
  1764. u64 flags = 0;
  1765. ins.objectid = node->bytenr;
  1766. ins.offset = node->num_bytes;
  1767. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1768. ref = btrfs_delayed_node_to_data_ref(node);
  1769. if (node->type == BTRFS_SHARED_DATA_REF_KEY)
  1770. parent = ref->parent;
  1771. else
  1772. ref_root = ref->root;
  1773. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1774. if (extent_op) {
  1775. BUG_ON(extent_op->update_key);
  1776. flags |= extent_op->flags_to_set;
  1777. }
  1778. ret = alloc_reserved_file_extent(trans, root,
  1779. parent, ref_root, flags,
  1780. ref->objectid, ref->offset,
  1781. &ins, node->ref_mod);
  1782. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1783. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  1784. node->num_bytes, parent,
  1785. ref_root, ref->objectid,
  1786. ref->offset, node->ref_mod,
  1787. extent_op);
  1788. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1789. ret = __btrfs_free_extent(trans, root, node->bytenr,
  1790. node->num_bytes, parent,
  1791. ref_root, ref->objectid,
  1792. ref->offset, node->ref_mod,
  1793. extent_op);
  1794. } else {
  1795. BUG();
  1796. }
  1797. return ret;
  1798. }
  1799. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  1800. struct extent_buffer *leaf,
  1801. struct btrfs_extent_item *ei)
  1802. {
  1803. u64 flags = btrfs_extent_flags(leaf, ei);
  1804. if (extent_op->update_flags) {
  1805. flags |= extent_op->flags_to_set;
  1806. btrfs_set_extent_flags(leaf, ei, flags);
  1807. }
  1808. if (extent_op->update_key) {
  1809. struct btrfs_tree_block_info *bi;
  1810. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
  1811. bi = (struct btrfs_tree_block_info *)(ei + 1);
  1812. btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
  1813. }
  1814. }
  1815. static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
  1816. struct btrfs_root *root,
  1817. struct btrfs_delayed_ref_node *node,
  1818. struct btrfs_delayed_extent_op *extent_op)
  1819. {
  1820. struct btrfs_key key;
  1821. struct btrfs_path *path;
  1822. struct btrfs_extent_item *ei;
  1823. struct extent_buffer *leaf;
  1824. u32 item_size;
  1825. int ret;
  1826. int err = 0;
  1827. path = btrfs_alloc_path();
  1828. if (!path)
  1829. return -ENOMEM;
  1830. key.objectid = node->bytenr;
  1831. key.type = BTRFS_EXTENT_ITEM_KEY;
  1832. key.offset = node->num_bytes;
  1833. path->reada = 1;
  1834. path->leave_spinning = 1;
  1835. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
  1836. path, 0, 1);
  1837. if (ret < 0) {
  1838. err = ret;
  1839. goto out;
  1840. }
  1841. if (ret > 0) {
  1842. err = -EIO;
  1843. goto out;
  1844. }
  1845. leaf = path->nodes[0];
  1846. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1847. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1848. if (item_size < sizeof(*ei)) {
  1849. ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
  1850. path, (u64)-1, 0);
  1851. if (ret < 0) {
  1852. err = ret;
  1853. goto out;
  1854. }
  1855. leaf = path->nodes[0];
  1856. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1857. }
  1858. #endif
  1859. BUG_ON(item_size < sizeof(*ei));
  1860. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1861. __run_delayed_extent_op(extent_op, leaf, ei);
  1862. btrfs_mark_buffer_dirty(leaf);
  1863. out:
  1864. btrfs_free_path(path);
  1865. return err;
  1866. }
  1867. static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
  1868. struct btrfs_root *root,
  1869. struct btrfs_delayed_ref_node *node,
  1870. struct btrfs_delayed_extent_op *extent_op,
  1871. int insert_reserved)
  1872. {
  1873. int ret = 0;
  1874. struct btrfs_delayed_tree_ref *ref;
  1875. struct btrfs_key ins;
  1876. u64 parent = 0;
  1877. u64 ref_root = 0;
  1878. ins.objectid = node->bytenr;
  1879. ins.offset = node->num_bytes;
  1880. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1881. ref = btrfs_delayed_node_to_tree_ref(node);
  1882. if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  1883. parent = ref->parent;
  1884. else
  1885. ref_root = ref->root;
  1886. BUG_ON(node->ref_mod != 1);
  1887. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1888. BUG_ON(!extent_op || !extent_op->update_flags ||
  1889. !extent_op->update_key);
  1890. ret = alloc_reserved_tree_block(trans, root,
  1891. parent, ref_root,
  1892. extent_op->flags_to_set,
  1893. &extent_op->key,
  1894. ref->level, &ins);
  1895. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1896. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  1897. node->num_bytes, parent, ref_root,
  1898. ref->level, 0, 1, extent_op);
  1899. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1900. ret = __btrfs_free_extent(trans, root, node->bytenr,
  1901. node->num_bytes, parent, ref_root,
  1902. ref->level, 0, 1, extent_op);
  1903. } else {
  1904. BUG();
  1905. }
  1906. return ret;
  1907. }
  1908. /* helper function to actually process a single delayed ref entry */
  1909. static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
  1910. struct btrfs_root *root,
  1911. struct btrfs_delayed_ref_node *node,
  1912. struct btrfs_delayed_extent_op *extent_op,
  1913. int insert_reserved)
  1914. {
  1915. int ret;
  1916. if (btrfs_delayed_ref_is_head(node)) {
  1917. struct btrfs_delayed_ref_head *head;
  1918. /*
  1919. * we've hit the end of the chain and we were supposed
  1920. * to insert this extent into the tree. But, it got
  1921. * deleted before we ever needed to insert it, so all
  1922. * we have to do is clean up the accounting
  1923. */
  1924. BUG_ON(extent_op);
  1925. head = btrfs_delayed_node_to_head(node);
  1926. if (insert_reserved) {
  1927. btrfs_pin_extent(root, node->bytenr,
  1928. node->num_bytes, 1);
  1929. if (head->is_data) {
  1930. ret = btrfs_del_csums(trans, root,
  1931. node->bytenr,
  1932. node->num_bytes);
  1933. BUG_ON(ret);
  1934. }
  1935. }
  1936. mutex_unlock(&head->mutex);
  1937. return 0;
  1938. }
  1939. if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
  1940. node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  1941. ret = run_delayed_tree_ref(trans, root, node, extent_op,
  1942. insert_reserved);
  1943. else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
  1944. node->type == BTRFS_SHARED_DATA_REF_KEY)
  1945. ret = run_delayed_data_ref(trans, root, node, extent_op,
  1946. insert_reserved);
  1947. else
  1948. BUG();
  1949. return ret;
  1950. }
  1951. static noinline struct btrfs_delayed_ref_node *
  1952. select_delayed_ref(struct btrfs_delayed_ref_head *head)
  1953. {
  1954. struct rb_node *node;
  1955. struct btrfs_delayed_ref_node *ref;
  1956. int action = BTRFS_ADD_DELAYED_REF;
  1957. again:
  1958. /*
  1959. * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
  1960. * this prevents ref count from going down to zero when
  1961. * there still are pending delayed ref.
  1962. */
  1963. node = rb_prev(&head->node.rb_node);
  1964. while (1) {
  1965. if (!node)
  1966. break;
  1967. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  1968. rb_node);
  1969. if (ref->bytenr != head->node.bytenr)
  1970. break;
  1971. if (ref->action == action)
  1972. return ref;
  1973. node = rb_prev(node);
  1974. }
  1975. if (action == BTRFS_ADD_DELAYED_REF) {
  1976. action = BTRFS_DROP_DELAYED_REF;
  1977. goto again;
  1978. }
  1979. return NULL;
  1980. }
  1981. static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
  1982. struct btrfs_root *root,
  1983. struct list_head *cluster)
  1984. {
  1985. struct btrfs_delayed_ref_root *delayed_refs;
  1986. struct btrfs_delayed_ref_node *ref;
  1987. struct btrfs_delayed_ref_head *locked_ref = NULL;
  1988. struct btrfs_delayed_extent_op *extent_op;
  1989. int ret;
  1990. int count = 0;
  1991. int must_insert_reserved = 0;
  1992. delayed_refs = &trans->transaction->delayed_refs;
  1993. while (1) {
  1994. if (!locked_ref) {
  1995. /* pick a new head ref from the cluster list */
  1996. if (list_empty(cluster))
  1997. break;
  1998. locked_ref = list_entry(cluster->next,
  1999. struct btrfs_delayed_ref_head, cluster);
  2000. /* grab the lock that says we are going to process
  2001. * all the refs for this head */
  2002. ret = btrfs_delayed_ref_lock(trans, locked_ref);
  2003. /*
  2004. * we may have dropped the spin lock to get the head
  2005. * mutex lock, and that might have given someone else
  2006. * time to free the head. If that's true, it has been
  2007. * removed from our list and we can move on.
  2008. */
  2009. if (ret == -EAGAIN) {
  2010. locked_ref = NULL;
  2011. count++;
  2012. continue;
  2013. }
  2014. }
  2015. /*
  2016. * record the must insert reserved flag before we
  2017. * drop the spin lock.
  2018. */
  2019. must_insert_reserved = locked_ref->must_insert_reserved;
  2020. locked_ref->must_insert_reserved = 0;
  2021. extent_op = locked_ref->extent_op;
  2022. locked_ref->extent_op = NULL;
  2023. /*
  2024. * locked_ref is the head node, so we have to go one
  2025. * node back for any delayed ref updates
  2026. */
  2027. ref = select_delayed_ref(locked_ref);
  2028. if (!ref) {
  2029. /* All delayed refs have been processed, Go ahead
  2030. * and send the head node to run_one_delayed_ref,
  2031. * so that any accounting fixes can happen
  2032. */
  2033. ref = &locked_ref->node;
  2034. if (extent_op && must_insert_reserved) {
  2035. kfree(extent_op);
  2036. extent_op = NULL;
  2037. }
  2038. if (extent_op) {
  2039. spin_unlock(&delayed_refs->lock);
  2040. ret = run_delayed_extent_op(trans, root,
  2041. ref, extent_op);
  2042. BUG_ON(ret);
  2043. kfree(extent_op);
  2044. cond_resched();
  2045. spin_lock(&delayed_refs->lock);
  2046. continue;
  2047. }
  2048. list_del_init(&locked_ref->cluster);
  2049. locked_ref = NULL;
  2050. }
  2051. ref->in_tree = 0;
  2052. rb_erase(&ref->rb_node, &delayed_refs->root);
  2053. delayed_refs->num_entries--;
  2054. spin_unlock(&delayed_refs->lock);
  2055. ret = run_one_delayed_ref(trans, root, ref, extent_op,
  2056. must_insert_reserved);
  2057. BUG_ON(ret);
  2058. btrfs_put_delayed_ref(ref);
  2059. kfree(extent_op);
  2060. count++;
  2061. cond_resched();
  2062. spin_lock(&delayed_refs->lock);
  2063. }
  2064. return count;
  2065. }
  2066. /*
  2067. * this starts processing the delayed reference count updates and
  2068. * extent insertions we have queued up so far. count can be
  2069. * 0, which means to process everything in the tree at the start
  2070. * of the run (but not newly added entries), or it can be some target
  2071. * number you'd like to process.
  2072. */
  2073. int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2074. struct btrfs_root *root, unsigned long count)
  2075. {
  2076. struct rb_node *node;
  2077. struct btrfs_delayed_ref_root *delayed_refs;
  2078. struct btrfs_delayed_ref_node *ref;
  2079. struct list_head cluster;
  2080. int ret;
  2081. int run_all = count == (unsigned long)-1;
  2082. int run_most = 0;
  2083. if (root == root->fs_info->extent_root)
  2084. root = root->fs_info->tree_root;
  2085. delayed_refs = &trans->transaction->delayed_refs;
  2086. INIT_LIST_HEAD(&cluster);
  2087. again:
  2088. spin_lock(&delayed_refs->lock);
  2089. if (count == 0) {
  2090. count = delayed_refs->num_entries * 2;
  2091. run_most = 1;
  2092. }
  2093. while (1) {
  2094. if (!(run_all || run_most) &&
  2095. delayed_refs->num_heads_ready < 64)
  2096. break;
  2097. /*
  2098. * go find something we can process in the rbtree. We start at
  2099. * the beginning of the tree, and then build a cluster
  2100. * of refs to process starting at the first one we are able to
  2101. * lock
  2102. */
  2103. ret = btrfs_find_ref_cluster(trans, &cluster,
  2104. delayed_refs->run_delayed_start);
  2105. if (ret)
  2106. break;
  2107. ret = run_clustered_refs(trans, root, &cluster);
  2108. BUG_ON(ret < 0);
  2109. count -= min_t(unsigned long, ret, count);
  2110. if (count == 0)
  2111. break;
  2112. }
  2113. if (run_all) {
  2114. node = rb_first(&delayed_refs->root);
  2115. if (!node)
  2116. goto out;
  2117. count = (unsigned long)-1;
  2118. while (node) {
  2119. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  2120. rb_node);
  2121. if (btrfs_delayed_ref_is_head(ref)) {
  2122. struct btrfs_delayed_ref_head *head;
  2123. head = btrfs_delayed_node_to_head(ref);
  2124. atomic_inc(&ref->refs);
  2125. spin_unlock(&delayed_refs->lock);
  2126. /*
  2127. * Mutex was contended, block until it's
  2128. * released and try again
  2129. */
  2130. mutex_lock(&head->mutex);
  2131. mutex_unlock(&head->mutex);
  2132. btrfs_put_delayed_ref(ref);
  2133. cond_resched();
  2134. goto again;
  2135. }
  2136. node = rb_next(node);
  2137. }
  2138. spin_unlock(&delayed_refs->lock);
  2139. schedule_timeout(1);
  2140. goto again;
  2141. }
  2142. out:
  2143. spin_unlock(&delayed_refs->lock);
  2144. return 0;
  2145. }
  2146. int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
  2147. struct btrfs_root *root,
  2148. u64 bytenr, u64 num_bytes, u64 flags,
  2149. int is_data)
  2150. {
  2151. struct btrfs_delayed_extent_op *extent_op;
  2152. int ret;
  2153. extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
  2154. if (!extent_op)
  2155. return -ENOMEM;
  2156. extent_op->flags_to_set = flags;
  2157. extent_op->update_flags = 1;
  2158. extent_op->update_key = 0;
  2159. extent_op->is_data = is_data ? 1 : 0;
  2160. ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
  2161. if (ret)
  2162. kfree(extent_op);
  2163. return ret;
  2164. }
  2165. static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
  2166. struct btrfs_root *root,
  2167. struct btrfs_path *path,
  2168. u64 objectid, u64 offset, u64 bytenr)
  2169. {
  2170. struct btrfs_delayed_ref_head *head;
  2171. struct btrfs_delayed_ref_node *ref;
  2172. struct btrfs_delayed_data_ref *data_ref;
  2173. struct btrfs_delayed_ref_root *delayed_refs;
  2174. struct rb_node *node;
  2175. int ret = 0;
  2176. ret = -ENOENT;
  2177. delayed_refs = &trans->transaction->delayed_refs;
  2178. spin_lock(&delayed_refs->lock);
  2179. head = btrfs_find_delayed_ref_head(trans, bytenr);
  2180. if (!head)
  2181. goto out;
  2182. if (!mutex_trylock(&head->mutex)) {
  2183. atomic_inc(&head->node.refs);
  2184. spin_unlock(&delayed_refs->lock);
  2185. btrfs_release_path(path);
  2186. /*
  2187. * Mutex was contended, block until it's released and let
  2188. * caller try again
  2189. */
  2190. mutex_lock(&head->mutex);
  2191. mutex_unlock(&head->mutex);
  2192. btrfs_put_delayed_ref(&head->node);
  2193. return -EAGAIN;
  2194. }
  2195. node = rb_prev(&head->node.rb_node);
  2196. if (!node)
  2197. goto out_unlock;
  2198. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  2199. if (ref->bytenr != bytenr)
  2200. goto out_unlock;
  2201. ret = 1;
  2202. if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
  2203. goto out_unlock;
  2204. data_ref = btrfs_delayed_node_to_data_ref(ref);
  2205. node = rb_prev(node);
  2206. if (node) {
  2207. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  2208. if (ref->bytenr == bytenr)
  2209. goto out_unlock;
  2210. }
  2211. if (data_ref->root != root->root_key.objectid ||
  2212. data_ref->objectid != objectid || data_ref->offset != offset)
  2213. goto out_unlock;
  2214. ret = 0;
  2215. out_unlock:
  2216. mutex_unlock(&head->mutex);
  2217. out:
  2218. spin_unlock(&delayed_refs->lock);
  2219. return ret;
  2220. }
  2221. static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
  2222. struct btrfs_root *root,
  2223. struct btrfs_path *path,
  2224. u64 objectid, u64 offset, u64 bytenr)
  2225. {
  2226. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2227. struct extent_buffer *leaf;
  2228. struct btrfs_extent_data_ref *ref;
  2229. struct btrfs_extent_inline_ref *iref;
  2230. struct btrfs_extent_item *ei;
  2231. struct btrfs_key key;
  2232. u32 item_size;
  2233. int ret;
  2234. key.objectid = bytenr;
  2235. key.offset = (u64)-1;
  2236. key.type = BTRFS_EXTENT_ITEM_KEY;
  2237. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  2238. if (ret < 0)
  2239. goto out;
  2240. BUG_ON(ret == 0);
  2241. ret = -ENOENT;
  2242. if (path->slots[0] == 0)
  2243. goto out;
  2244. path->slots[0]--;
  2245. leaf = path->nodes[0];
  2246. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2247. if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
  2248. goto out;
  2249. ret = 1;
  2250. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2251. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2252. if (item_size < sizeof(*ei)) {
  2253. WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  2254. goto out;
  2255. }
  2256. #endif
  2257. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2258. if (item_size != sizeof(*ei) +
  2259. btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
  2260. goto out;
  2261. if (btrfs_extent_generation(leaf, ei) <=
  2262. btrfs_root_last_snapshot(&root->root_item))
  2263. goto out;
  2264. iref = (struct btrfs_extent_inline_ref *)(ei + 1);
  2265. if (btrfs_extent_inline_ref_type(leaf, iref) !=
  2266. BTRFS_EXTENT_DATA_REF_KEY)
  2267. goto out;
  2268. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  2269. if (btrfs_extent_refs(leaf, ei) !=
  2270. btrfs_extent_data_ref_count(leaf, ref) ||
  2271. btrfs_extent_data_ref_root(leaf, ref) !=
  2272. root->root_key.objectid ||
  2273. btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
  2274. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  2275. goto out;
  2276. ret = 0;
  2277. out:
  2278. return ret;
  2279. }
  2280. int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
  2281. struct btrfs_root *root,
  2282. u64 objectid, u64 offset, u64 bytenr)
  2283. {
  2284. struct btrfs_path *path;
  2285. int ret;
  2286. int ret2;
  2287. path = btrfs_alloc_path();
  2288. if (!path)
  2289. return -ENOENT;
  2290. do {
  2291. ret = check_committed_ref(trans, root, path, objectid,
  2292. offset, bytenr);
  2293. if (ret && ret != -ENOENT)
  2294. goto out;
  2295. ret2 = check_delayed_ref(trans, root, path, objectid,
  2296. offset, bytenr);
  2297. } while (ret2 == -EAGAIN);
  2298. if (ret2 && ret2 != -ENOENT) {
  2299. ret = ret2;
  2300. goto out;
  2301. }
  2302. if (ret != -ENOENT || ret2 != -ENOENT)
  2303. ret = 0;
  2304. out:
  2305. btrfs_free_path(path);
  2306. if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
  2307. WARN_ON(ret > 0);
  2308. return ret;
  2309. }
  2310. static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
  2311. struct btrfs_root *root,
  2312. struct extent_buffer *buf,
  2313. int full_backref, int inc)
  2314. {
  2315. u64 bytenr;
  2316. u64 num_bytes;
  2317. u64 parent;
  2318. u64 ref_root;
  2319. u32 nritems;
  2320. struct btrfs_key key;
  2321. struct btrfs_file_extent_item *fi;
  2322. int i;
  2323. int level;
  2324. int ret = 0;
  2325. int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
  2326. u64, u64, u64, u64, u64, u64);
  2327. ref_root = btrfs_header_owner(buf);
  2328. nritems = btrfs_header_nritems(buf);
  2329. level = btrfs_header_level(buf);
  2330. if (!root->ref_cows && level == 0)
  2331. return 0;
  2332. if (inc)
  2333. process_func = btrfs_inc_extent_ref;
  2334. else
  2335. process_func = btrfs_free_extent;
  2336. if (full_backref)
  2337. parent = buf->start;
  2338. else
  2339. parent = 0;
  2340. for (i = 0; i < nritems; i++) {
  2341. if (level == 0) {
  2342. btrfs_item_key_to_cpu(buf, &key, i);
  2343. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  2344. continue;
  2345. fi = btrfs_item_ptr(buf, i,
  2346. struct btrfs_file_extent_item);
  2347. if (btrfs_file_extent_type(buf, fi) ==
  2348. BTRFS_FILE_EXTENT_INLINE)
  2349. continue;
  2350. bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  2351. if (bytenr == 0)
  2352. continue;
  2353. num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
  2354. key.offset -= btrfs_file_extent_offset(buf, fi);
  2355. ret = process_func(trans, root, bytenr, num_bytes,
  2356. parent, ref_root, key.objectid,
  2357. key.offset);
  2358. if (ret)
  2359. goto fail;
  2360. } else {
  2361. bytenr = btrfs_node_blockptr(buf, i);
  2362. num_bytes = btrfs_level_size(root, level - 1);
  2363. ret = process_func(trans, root, bytenr, num_bytes,
  2364. parent, ref_root, level - 1, 0);
  2365. if (ret)
  2366. goto fail;
  2367. }
  2368. }
  2369. return 0;
  2370. fail:
  2371. BUG();
  2372. return ret;
  2373. }
  2374. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2375. struct extent_buffer *buf, int full_backref)
  2376. {
  2377. return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
  2378. }
  2379. int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2380. struct extent_buffer *buf, int full_backref)
  2381. {
  2382. return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
  2383. }
  2384. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  2385. struct btrfs_root *root,
  2386. struct btrfs_path *path,
  2387. struct btrfs_block_group_cache *cache)
  2388. {
  2389. int ret;
  2390. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2391. unsigned long bi;
  2392. struct extent_buffer *leaf;
  2393. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  2394. if (ret < 0)
  2395. goto fail;
  2396. BUG_ON(ret);
  2397. leaf = path->nodes[0];
  2398. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  2399. write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
  2400. btrfs_mark_buffer_dirty(leaf);
  2401. btrfs_release_path(path);
  2402. fail:
  2403. if (ret)
  2404. return ret;
  2405. return 0;
  2406. }
  2407. static struct btrfs_block_group_cache *
  2408. next_block_group(struct btrfs_root *root,
  2409. struct btrfs_block_group_cache *cache)
  2410. {
  2411. struct rb_node *node;
  2412. spin_lock(&root->fs_info->block_group_cache_lock);
  2413. node = rb_next(&cache->cache_node);
  2414. btrfs_put_block_group(cache);
  2415. if (node) {
  2416. cache = rb_entry(node, struct btrfs_block_group_cache,
  2417. cache_node);
  2418. btrfs_get_block_group(cache);
  2419. } else
  2420. cache = NULL;
  2421. spin_unlock(&root->fs_info->block_group_cache_lock);
  2422. return cache;
  2423. }
  2424. static int cache_save_setup(struct btrfs_block_group_cache *block_group,
  2425. struct btrfs_trans_handle *trans,
  2426. struct btrfs_path *path)
  2427. {
  2428. struct btrfs_root *root = block_group->fs_info->tree_root;
  2429. struct inode *inode = NULL;
  2430. u64 alloc_hint = 0;
  2431. int dcs = BTRFS_DC_ERROR;
  2432. int num_pages = 0;
  2433. int retries = 0;
  2434. int ret = 0;
  2435. /*
  2436. * If this block group is smaller than 100 megs don't bother caching the
  2437. * block group.
  2438. */
  2439. if (block_group->key.offset < (100 * 1024 * 1024)) {
  2440. spin_lock(&block_group->lock);
  2441. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  2442. spin_unlock(&block_group->lock);
  2443. return 0;
  2444. }
  2445. again:
  2446. inode = lookup_free_space_inode(root, block_group, path);
  2447. if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
  2448. ret = PTR_ERR(inode);
  2449. btrfs_release_path(path);
  2450. goto out;
  2451. }
  2452. if (IS_ERR(inode)) {
  2453. BUG_ON(retries);
  2454. retries++;
  2455. if (block_group->ro)
  2456. goto out_free;
  2457. ret = create_free_space_inode(root, trans, block_group, path);
  2458. if (ret)
  2459. goto out_free;
  2460. goto again;
  2461. }
  2462. /* We've already setup this transaction, go ahead and exit */
  2463. if (block_group->cache_generation == trans->transid &&
  2464. i_size_read(inode)) {
  2465. dcs = BTRFS_DC_SETUP;
  2466. goto out_put;
  2467. }
  2468. /*
  2469. * We want to set the generation to 0, that way if anything goes wrong
  2470. * from here on out we know not to trust this cache when we load up next
  2471. * time.
  2472. */
  2473. BTRFS_I(inode)->generation = 0;
  2474. ret = btrfs_update_inode(trans, root, inode);
  2475. WARN_ON(ret);
  2476. if (i_size_read(inode) > 0) {
  2477. ret = btrfs_truncate_free_space_cache(root, trans, path,
  2478. inode);
  2479. if (ret)
  2480. goto out_put;
  2481. }
  2482. spin_lock(&block_group->lock);
  2483. if (block_group->cached != BTRFS_CACHE_FINISHED) {
  2484. /* We're not cached, don't bother trying to write stuff out */
  2485. dcs = BTRFS_DC_WRITTEN;
  2486. spin_unlock(&block_group->lock);
  2487. goto out_put;
  2488. }
  2489. spin_unlock(&block_group->lock);
  2490. num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
  2491. if (!num_pages)
  2492. num_pages = 1;
  2493. /*
  2494. * Just to make absolutely sure we have enough space, we're going to
  2495. * preallocate 12 pages worth of space for each block group. In
  2496. * practice we ought to use at most 8, but we need extra space so we can
  2497. * add our header and have a terminator between the extents and the
  2498. * bitmaps.
  2499. */
  2500. num_pages *= 16;
  2501. num_pages *= PAGE_CACHE_SIZE;
  2502. ret = btrfs_check_data_free_space(inode, num_pages);
  2503. if (ret)
  2504. goto out_put;
  2505. ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
  2506. num_pages, num_pages,
  2507. &alloc_hint);
  2508. if (!ret)
  2509. dcs = BTRFS_DC_SETUP;
  2510. btrfs_free_reserved_data_space(inode, num_pages);
  2511. out_put:
  2512. iput(inode);
  2513. out_free:
  2514. btrfs_release_path(path);
  2515. out:
  2516. spin_lock(&block_group->lock);
  2517. if (!ret && dcs == BTRFS_DC_SETUP)
  2518. block_group->cache_generation = trans->transid;
  2519. block_group->disk_cache_state = dcs;
  2520. spin_unlock(&block_group->lock);
  2521. return ret;
  2522. }
  2523. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  2524. struct btrfs_root *root)
  2525. {
  2526. struct btrfs_block_group_cache *cache;
  2527. int err = 0;
  2528. struct btrfs_path *path;
  2529. u64 last = 0;
  2530. path = btrfs_alloc_path();
  2531. if (!path)
  2532. return -ENOMEM;
  2533. again:
  2534. while (1) {
  2535. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2536. while (cache) {
  2537. if (cache->disk_cache_state == BTRFS_DC_CLEAR)
  2538. break;
  2539. cache = next_block_group(root, cache);
  2540. }
  2541. if (!cache) {
  2542. if (last == 0)
  2543. break;
  2544. last = 0;
  2545. continue;
  2546. }
  2547. err = cache_save_setup(cache, trans, path);
  2548. last = cache->key.objectid + cache->key.offset;
  2549. btrfs_put_block_group(cache);
  2550. }
  2551. while (1) {
  2552. if (last == 0) {
  2553. err = btrfs_run_delayed_refs(trans, root,
  2554. (unsigned long)-1);
  2555. BUG_ON(err);
  2556. }
  2557. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2558. while (cache) {
  2559. if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
  2560. btrfs_put_block_group(cache);
  2561. goto again;
  2562. }
  2563. if (cache->dirty)
  2564. break;
  2565. cache = next_block_group(root, cache);
  2566. }
  2567. if (!cache) {
  2568. if (last == 0)
  2569. break;
  2570. last = 0;
  2571. continue;
  2572. }
  2573. if (cache->disk_cache_state == BTRFS_DC_SETUP)
  2574. cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
  2575. cache->dirty = 0;
  2576. last = cache->key.objectid + cache->key.offset;
  2577. err = write_one_cache_group(trans, root, path, cache);
  2578. BUG_ON(err);
  2579. btrfs_put_block_group(cache);
  2580. }
  2581. while (1) {
  2582. /*
  2583. * I don't think this is needed since we're just marking our
  2584. * preallocated extent as written, but just in case it can't
  2585. * hurt.
  2586. */
  2587. if (last == 0) {
  2588. err = btrfs_run_delayed_refs(trans, root,
  2589. (unsigned long)-1);
  2590. BUG_ON(err);
  2591. }
  2592. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2593. while (cache) {
  2594. /*
  2595. * Really this shouldn't happen, but it could if we
  2596. * couldn't write the entire preallocated extent and
  2597. * splitting the extent resulted in a new block.
  2598. */
  2599. if (cache->dirty) {
  2600. btrfs_put_block_group(cache);
  2601. goto again;
  2602. }
  2603. if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  2604. break;
  2605. cache = next_block_group(root, cache);
  2606. }
  2607. if (!cache) {
  2608. if (last == 0)
  2609. break;
  2610. last = 0;
  2611. continue;
  2612. }
  2613. btrfs_write_out_cache(root, trans, cache, path);
  2614. /*
  2615. * If we didn't have an error then the cache state is still
  2616. * NEED_WRITE, so we can set it to WRITTEN.
  2617. */
  2618. if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  2619. cache->disk_cache_state = BTRFS_DC_WRITTEN;
  2620. last = cache->key.objectid + cache->key.offset;
  2621. btrfs_put_block_group(cache);
  2622. }
  2623. btrfs_free_path(path);
  2624. return 0;
  2625. }
  2626. int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
  2627. {
  2628. struct btrfs_block_group_cache *block_group;
  2629. int readonly = 0;
  2630. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  2631. if (!block_group || block_group->ro)
  2632. readonly = 1;
  2633. if (block_group)
  2634. btrfs_put_block_group(block_group);
  2635. return readonly;
  2636. }
  2637. static int update_space_info(struct btrfs_fs_info *info, u64 flags,
  2638. u64 total_bytes, u64 bytes_used,
  2639. struct btrfs_space_info **space_info)
  2640. {
  2641. struct btrfs_space_info *found;
  2642. int i;
  2643. int factor;
  2644. if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
  2645. BTRFS_BLOCK_GROUP_RAID10))
  2646. factor = 2;
  2647. else
  2648. factor = 1;
  2649. found = __find_space_info(info, flags);
  2650. if (found) {
  2651. spin_lock(&found->lock);
  2652. found->total_bytes += total_bytes;
  2653. found->disk_total += total_bytes * factor;
  2654. found->bytes_used += bytes_used;
  2655. found->disk_used += bytes_used * factor;
  2656. found->full = 0;
  2657. spin_unlock(&found->lock);
  2658. *space_info = found;
  2659. return 0;
  2660. }
  2661. found = kzalloc(sizeof(*found), GFP_NOFS);
  2662. if (!found)
  2663. return -ENOMEM;
  2664. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  2665. INIT_LIST_HEAD(&found->block_groups[i]);
  2666. init_rwsem(&found->groups_sem);
  2667. spin_lock_init(&found->lock);
  2668. found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
  2669. BTRFS_BLOCK_GROUP_SYSTEM |
  2670. BTRFS_BLOCK_GROUP_METADATA);
  2671. found->total_bytes = total_bytes;
  2672. found->disk_total = total_bytes * factor;
  2673. found->bytes_used = bytes_used;
  2674. found->disk_used = bytes_used * factor;
  2675. found->bytes_pinned = 0;
  2676. found->bytes_reserved = 0;
  2677. found->bytes_readonly = 0;
  2678. found->bytes_may_use = 0;
  2679. found->full = 0;
  2680. found->force_alloc = CHUNK_ALLOC_NO_FORCE;
  2681. found->chunk_alloc = 0;
  2682. found->flush = 0;
  2683. init_waitqueue_head(&found->wait);
  2684. *space_info = found;
  2685. list_add_rcu(&found->list, &info->space_info);
  2686. return 0;
  2687. }
  2688. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  2689. {
  2690. u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
  2691. BTRFS_BLOCK_GROUP_RAID1 |
  2692. BTRFS_BLOCK_GROUP_RAID10 |
  2693. BTRFS_BLOCK_GROUP_DUP);
  2694. if (extra_flags) {
  2695. if (flags & BTRFS_BLOCK_GROUP_DATA)
  2696. fs_info->avail_data_alloc_bits |= extra_flags;
  2697. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  2698. fs_info->avail_metadata_alloc_bits |= extra_flags;
  2699. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  2700. fs_info->avail_system_alloc_bits |= extra_flags;
  2701. }
  2702. }
  2703. u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
  2704. {
  2705. /*
  2706. * we add in the count of missing devices because we want
  2707. * to make sure that any RAID levels on a degraded FS
  2708. * continue to be honored.
  2709. */
  2710. u64 num_devices = root->fs_info->fs_devices->rw_devices +
  2711. root->fs_info->fs_devices->missing_devices;
  2712. if (num_devices == 1)
  2713. flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
  2714. if (num_devices < 4)
  2715. flags &= ~BTRFS_BLOCK_GROUP_RAID10;
  2716. if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
  2717. (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  2718. BTRFS_BLOCK_GROUP_RAID10))) {
  2719. flags &= ~BTRFS_BLOCK_GROUP_DUP;
  2720. }
  2721. if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
  2722. (flags & BTRFS_BLOCK_GROUP_RAID10)) {
  2723. flags &= ~BTRFS_BLOCK_GROUP_RAID1;
  2724. }
  2725. if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
  2726. ((flags & BTRFS_BLOCK_GROUP_RAID1) |
  2727. (flags & BTRFS_BLOCK_GROUP_RAID10) |
  2728. (flags & BTRFS_BLOCK_GROUP_DUP)))
  2729. flags &= ~BTRFS_BLOCK_GROUP_RAID0;
  2730. return flags;
  2731. }
  2732. static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
  2733. {
  2734. if (flags & BTRFS_BLOCK_GROUP_DATA)
  2735. flags |= root->fs_info->avail_data_alloc_bits;
  2736. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  2737. flags |= root->fs_info->avail_system_alloc_bits;
  2738. else if (flags & BTRFS_BLOCK_GROUP_METADATA)
  2739. flags |= root->fs_info->avail_metadata_alloc_bits;
  2740. return btrfs_reduce_alloc_profile(root, flags);
  2741. }
  2742. u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
  2743. {
  2744. u64 flags;
  2745. if (data)
  2746. flags = BTRFS_BLOCK_GROUP_DATA;
  2747. else if (root == root->fs_info->chunk_root)
  2748. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  2749. else
  2750. flags = BTRFS_BLOCK_GROUP_METADATA;
  2751. return get_alloc_profile(root, flags);
  2752. }
  2753. void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
  2754. {
  2755. BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
  2756. BTRFS_BLOCK_GROUP_DATA);
  2757. }
  2758. /*
  2759. * This will check the space that the inode allocates from to make sure we have
  2760. * enough space for bytes.
  2761. */
  2762. int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
  2763. {
  2764. struct btrfs_space_info *data_sinfo;
  2765. struct btrfs_root *root = BTRFS_I(inode)->root;
  2766. u64 used;
  2767. int ret = 0, committed = 0, alloc_chunk = 1;
  2768. /* make sure bytes are sectorsize aligned */
  2769. bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  2770. if (root == root->fs_info->tree_root ||
  2771. BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
  2772. alloc_chunk = 0;
  2773. committed = 1;
  2774. }
  2775. data_sinfo = BTRFS_I(inode)->space_info;
  2776. if (!data_sinfo)
  2777. goto alloc;
  2778. again:
  2779. /* make sure we have enough space to handle the data first */
  2780. spin_lock(&data_sinfo->lock);
  2781. used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
  2782. data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
  2783. data_sinfo->bytes_may_use;
  2784. if (used + bytes > data_sinfo->total_bytes) {
  2785. struct btrfs_trans_handle *trans;
  2786. /*
  2787. * if we don't have enough free bytes in this space then we need
  2788. * to alloc a new chunk.
  2789. */
  2790. if (!data_sinfo->full && alloc_chunk) {
  2791. u64 alloc_target;
  2792. data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
  2793. spin_unlock(&data_sinfo->lock);
  2794. alloc:
  2795. alloc_target = btrfs_get_alloc_profile(root, 1);
  2796. trans = btrfs_join_transaction(root);
  2797. if (IS_ERR(trans))
  2798. return PTR_ERR(trans);
  2799. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  2800. bytes + 2 * 1024 * 1024,
  2801. alloc_target,
  2802. CHUNK_ALLOC_NO_FORCE);
  2803. btrfs_end_transaction(trans, root);
  2804. if (ret < 0) {
  2805. if (ret != -ENOSPC)
  2806. return ret;
  2807. else
  2808. goto commit_trans;
  2809. }
  2810. if (!data_sinfo) {
  2811. btrfs_set_inode_space_info(root, inode);
  2812. data_sinfo = BTRFS_I(inode)->space_info;
  2813. }
  2814. goto again;
  2815. }
  2816. /*
  2817. * If we have less pinned bytes than we want to allocate then
  2818. * don't bother committing the transaction, it won't help us.
  2819. */
  2820. if (data_sinfo->bytes_pinned < bytes)
  2821. committed = 1;
  2822. spin_unlock(&data_sinfo->lock);
  2823. /* commit the current transaction and try again */
  2824. commit_trans:
  2825. if (!committed &&
  2826. !atomic_read(&root->fs_info->open_ioctl_trans)) {
  2827. committed = 1;
  2828. trans = btrfs_join_transaction(root);
  2829. if (IS_ERR(trans))
  2830. return PTR_ERR(trans);
  2831. ret = btrfs_commit_transaction(trans, root);
  2832. if (ret)
  2833. return ret;
  2834. goto again;
  2835. }
  2836. return -ENOSPC;
  2837. }
  2838. data_sinfo->bytes_may_use += bytes;
  2839. spin_unlock(&data_sinfo->lock);
  2840. return 0;
  2841. }
  2842. /*
  2843. * Called if we need to clear a data reservation for this inode.
  2844. */
  2845. void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
  2846. {
  2847. struct btrfs_root *root = BTRFS_I(inode)->root;
  2848. struct btrfs_space_info *data_sinfo;
  2849. /* make sure bytes are sectorsize aligned */
  2850. bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  2851. data_sinfo = BTRFS_I(inode)->space_info;
  2852. spin_lock(&data_sinfo->lock);
  2853. data_sinfo->bytes_may_use -= bytes;
  2854. spin_unlock(&data_sinfo->lock);
  2855. }
  2856. static void force_metadata_allocation(struct btrfs_fs_info *info)
  2857. {
  2858. struct list_head *head = &info->space_info;
  2859. struct btrfs_space_info *found;
  2860. rcu_read_lock();
  2861. list_for_each_entry_rcu(found, head, list) {
  2862. if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
  2863. found->force_alloc = CHUNK_ALLOC_FORCE;
  2864. }
  2865. rcu_read_unlock();
  2866. }
  2867. static int should_alloc_chunk(struct btrfs_root *root,
  2868. struct btrfs_space_info *sinfo, u64 alloc_bytes,
  2869. int force)
  2870. {
  2871. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  2872. u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
  2873. u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
  2874. u64 thresh;
  2875. if (force == CHUNK_ALLOC_FORCE)
  2876. return 1;
  2877. /*
  2878. * We need to take into account the global rsv because for all intents
  2879. * and purposes it's used space. Don't worry about locking the
  2880. * global_rsv, it doesn't change except when the transaction commits.
  2881. */
  2882. num_allocated += global_rsv->size;
  2883. /*
  2884. * in limited mode, we want to have some free space up to
  2885. * about 1% of the FS size.
  2886. */
  2887. if (force == CHUNK_ALLOC_LIMITED) {
  2888. thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
  2889. thresh = max_t(u64, 64 * 1024 * 1024,
  2890. div_factor_fine(thresh, 1));
  2891. if (num_bytes - num_allocated < thresh)
  2892. return 1;
  2893. }
  2894. /*
  2895. * we have two similar checks here, one based on percentage
  2896. * and once based on a hard number of 256MB. The idea
  2897. * is that if we have a good amount of free
  2898. * room, don't allocate a chunk. A good mount is
  2899. * less than 80% utilized of the chunks we have allocated,
  2900. * or more than 256MB free
  2901. */
  2902. if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
  2903. return 0;
  2904. if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
  2905. return 0;
  2906. thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
  2907. /* 256MB or 5% of the FS */
  2908. thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
  2909. if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
  2910. return 0;
  2911. return 1;
  2912. }
  2913. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  2914. struct btrfs_root *extent_root, u64 alloc_bytes,
  2915. u64 flags, int force)
  2916. {
  2917. struct btrfs_space_info *space_info;
  2918. struct btrfs_fs_info *fs_info = extent_root->fs_info;
  2919. int wait_for_alloc = 0;
  2920. int ret = 0;
  2921. flags = btrfs_reduce_alloc_profile(extent_root, flags);
  2922. space_info = __find_space_info(extent_root->fs_info, flags);
  2923. if (!space_info) {
  2924. ret = update_space_info(extent_root->fs_info, flags,
  2925. 0, 0, &space_info);
  2926. BUG_ON(ret);
  2927. }
  2928. BUG_ON(!space_info);
  2929. again:
  2930. spin_lock(&space_info->lock);
  2931. if (space_info->force_alloc)
  2932. force = space_info->force_alloc;
  2933. if (space_info->full) {
  2934. spin_unlock(&space_info->lock);
  2935. return 0;
  2936. }
  2937. if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
  2938. spin_unlock(&space_info->lock);
  2939. return 0;
  2940. } else if (space_info->chunk_alloc) {
  2941. wait_for_alloc = 1;
  2942. } else {
  2943. space_info->chunk_alloc = 1;
  2944. }
  2945. spin_unlock(&space_info->lock);
  2946. mutex_lock(&fs_info->chunk_mutex);
  2947. /*
  2948. * The chunk_mutex is held throughout the entirety of a chunk
  2949. * allocation, so once we've acquired the chunk_mutex we know that the
  2950. * other guy is done and we need to recheck and see if we should
  2951. * allocate.
  2952. */
  2953. if (wait_for_alloc) {
  2954. mutex_unlock(&fs_info->chunk_mutex);
  2955. wait_for_alloc = 0;
  2956. goto again;
  2957. }
  2958. /*
  2959. * If we have mixed data/metadata chunks we want to make sure we keep
  2960. * allocating mixed chunks instead of individual chunks.
  2961. */
  2962. if (btrfs_mixed_space_info(space_info))
  2963. flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
  2964. /*
  2965. * if we're doing a data chunk, go ahead and make sure that
  2966. * we keep a reasonable number of metadata chunks allocated in the
  2967. * FS as well.
  2968. */
  2969. if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
  2970. fs_info->data_chunk_allocations++;
  2971. if (!(fs_info->data_chunk_allocations %
  2972. fs_info->metadata_ratio))
  2973. force_metadata_allocation(fs_info);
  2974. }
  2975. ret = btrfs_alloc_chunk(trans, extent_root, flags);
  2976. if (ret < 0 && ret != -ENOSPC)
  2977. goto out;
  2978. spin_lock(&space_info->lock);
  2979. if (ret)
  2980. space_info->full = 1;
  2981. else
  2982. ret = 1;
  2983. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  2984. space_info->chunk_alloc = 0;
  2985. spin_unlock(&space_info->lock);
  2986. out:
  2987. mutex_unlock(&extent_root->fs_info->chunk_mutex);
  2988. return ret;
  2989. }
  2990. /*
  2991. * shrink metadata reservation for delalloc
  2992. */
  2993. static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
  2994. bool wait_ordered)
  2995. {
  2996. struct btrfs_block_rsv *block_rsv;
  2997. struct btrfs_space_info *space_info;
  2998. struct btrfs_trans_handle *trans;
  2999. u64 reserved;
  3000. u64 max_reclaim;
  3001. u64 reclaimed = 0;
  3002. long time_left;
  3003. unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
  3004. int loops = 0;
  3005. unsigned long progress;
  3006. trans = (struct btrfs_trans_handle *)current->journal_info;
  3007. block_rsv = &root->fs_info->delalloc_block_rsv;
  3008. space_info = block_rsv->space_info;
  3009. smp_mb();
  3010. reserved = space_info->bytes_may_use;
  3011. progress = space_info->reservation_progress;
  3012. if (reserved == 0)
  3013. return 0;
  3014. smp_mb();
  3015. if (root->fs_info->delalloc_bytes == 0) {
  3016. if (trans)
  3017. return 0;
  3018. btrfs_wait_ordered_extents(root, 0, 0);
  3019. return 0;
  3020. }
  3021. max_reclaim = min(reserved, to_reclaim);
  3022. nr_pages = max_t(unsigned long, nr_pages,
  3023. max_reclaim >> PAGE_CACHE_SHIFT);
  3024. while (loops < 1024) {
  3025. /* have the flusher threads jump in and do some IO */
  3026. smp_mb();
  3027. nr_pages = min_t(unsigned long, nr_pages,
  3028. root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
  3029. writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
  3030. spin_lock(&space_info->lock);
  3031. if (reserved > space_info->bytes_may_use)
  3032. reclaimed += reserved - space_info->bytes_may_use;
  3033. reserved = space_info->bytes_may_use;
  3034. spin_unlock(&space_info->lock);
  3035. loops++;
  3036. if (reserved == 0 || reclaimed >= max_reclaim)
  3037. break;
  3038. if (trans && trans->transaction->blocked)
  3039. return -EAGAIN;
  3040. if (wait_ordered && !trans) {
  3041. btrfs_wait_ordered_extents(root, 0, 0);
  3042. } else {
  3043. time_left = schedule_timeout_interruptible(1);
  3044. /* We were interrupted, exit */
  3045. if (time_left)
  3046. break;
  3047. }
  3048. /* we've kicked the IO a few times, if anything has been freed,
  3049. * exit. There is no sense in looping here for a long time
  3050. * when we really need to commit the transaction, or there are
  3051. * just too many writers without enough free space
  3052. */
  3053. if (loops > 3) {
  3054. smp_mb();
  3055. if (progress != space_info->reservation_progress)
  3056. break;
  3057. }
  3058. }
  3059. return reclaimed >= to_reclaim;
  3060. }
  3061. /**
  3062. * maybe_commit_transaction - possibly commit the transaction if its ok to
  3063. * @root - the root we're allocating for
  3064. * @bytes - the number of bytes we want to reserve
  3065. * @force - force the commit
  3066. *
  3067. * This will check to make sure that committing the transaction will actually
  3068. * get us somewhere and then commit the transaction if it does. Otherwise it
  3069. * will return -ENOSPC.
  3070. */
  3071. static int may_commit_transaction(struct btrfs_root *root,
  3072. struct btrfs_space_info *space_info,
  3073. u64 bytes, int force)
  3074. {
  3075. struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
  3076. struct btrfs_trans_handle *trans;
  3077. trans = (struct btrfs_trans_handle *)current->journal_info;
  3078. if (trans)
  3079. return -EAGAIN;
  3080. if (force)
  3081. goto commit;
  3082. /* See if there is enough pinned space to make this reservation */
  3083. spin_lock(&space_info->lock);
  3084. if (space_info->bytes_pinned >= bytes) {
  3085. spin_unlock(&space_info->lock);
  3086. goto commit;
  3087. }
  3088. spin_unlock(&space_info->lock);
  3089. /*
  3090. * See if there is some space in the delayed insertion reservation for
  3091. * this reservation.
  3092. */
  3093. if (space_info != delayed_rsv->space_info)
  3094. return -ENOSPC;
  3095. spin_lock(&delayed_rsv->lock);
  3096. if (delayed_rsv->size < bytes) {
  3097. spin_unlock(&delayed_rsv->lock);
  3098. return -ENOSPC;
  3099. }
  3100. spin_unlock(&delayed_rsv->lock);
  3101. commit:
  3102. trans = btrfs_join_transaction(root);
  3103. if (IS_ERR(trans))
  3104. return -ENOSPC;
  3105. return btrfs_commit_transaction(trans, root);
  3106. }
  3107. /**
  3108. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  3109. * @root - the root we're allocating for
  3110. * @block_rsv - the block_rsv we're allocating for
  3111. * @orig_bytes - the number of bytes we want
  3112. * @flush - wether or not we can flush to make our reservation
  3113. *
  3114. * This will reserve orgi_bytes number of bytes from the space info associated
  3115. * with the block_rsv. If there is not enough space it will make an attempt to
  3116. * flush out space to make room. It will do this by flushing delalloc if
  3117. * possible or committing the transaction. If flush is 0 then no attempts to
  3118. * regain reservations will be made and this will fail if there is not enough
  3119. * space already.
  3120. */
  3121. static int reserve_metadata_bytes(struct btrfs_root *root,
  3122. struct btrfs_block_rsv *block_rsv,
  3123. u64 orig_bytes, int flush)
  3124. {
  3125. struct btrfs_space_info *space_info = block_rsv->space_info;
  3126. u64 used;
  3127. u64 num_bytes = orig_bytes;
  3128. int retries = 0;
  3129. int ret = 0;
  3130. bool committed = false;
  3131. bool flushing = false;
  3132. bool wait_ordered = false;
  3133. again:
  3134. ret = 0;
  3135. spin_lock(&space_info->lock);
  3136. /*
  3137. * We only want to wait if somebody other than us is flushing and we are
  3138. * actually alloed to flush.
  3139. */
  3140. while (flush && !flushing && space_info->flush) {
  3141. spin_unlock(&space_info->lock);
  3142. /*
  3143. * If we have a trans handle we can't wait because the flusher
  3144. * may have to commit the transaction, which would mean we would
  3145. * deadlock since we are waiting for the flusher to finish, but
  3146. * hold the current transaction open.
  3147. */
  3148. if (current->journal_info)
  3149. return -EAGAIN;
  3150. ret = wait_event_interruptible(space_info->wait,
  3151. !space_info->flush);
  3152. /* Must have been interrupted, return */
  3153. if (ret)
  3154. return -EINTR;
  3155. spin_lock(&space_info->lock);
  3156. }
  3157. ret = -ENOSPC;
  3158. used = space_info->bytes_used + space_info->bytes_reserved +
  3159. space_info->bytes_pinned + space_info->bytes_readonly +
  3160. space_info->bytes_may_use;
  3161. /*
  3162. * The idea here is that we've not already over-reserved the block group
  3163. * then we can go ahead and save our reservation first and then start
  3164. * flushing if we need to. Otherwise if we've already overcommitted
  3165. * lets start flushing stuff first and then come back and try to make
  3166. * our reservation.
  3167. */
  3168. if (used <= space_info->total_bytes) {
  3169. if (used + orig_bytes <= space_info->total_bytes) {
  3170. space_info->bytes_may_use += orig_bytes;
  3171. ret = 0;
  3172. } else {
  3173. /*
  3174. * Ok set num_bytes to orig_bytes since we aren't
  3175. * overocmmitted, this way we only try and reclaim what
  3176. * we need.
  3177. */
  3178. num_bytes = orig_bytes;
  3179. }
  3180. } else {
  3181. /*
  3182. * Ok we're over committed, set num_bytes to the overcommitted
  3183. * amount plus the amount of bytes that we need for this
  3184. * reservation.
  3185. */
  3186. wait_ordered = true;
  3187. num_bytes = used - space_info->total_bytes +
  3188. (orig_bytes * (retries + 1));
  3189. }
  3190. if (ret) {
  3191. u64 profile = btrfs_get_alloc_profile(root, 0);
  3192. u64 avail;
  3193. /*
  3194. * If we have a lot of space that's pinned, don't bother doing
  3195. * the overcommit dance yet and just commit the transaction.
  3196. */
  3197. avail = (space_info->total_bytes - space_info->bytes_used) * 8;
  3198. do_div(avail, 10);
  3199. if (space_info->bytes_pinned >= avail && flush && !committed) {
  3200. space_info->flush = 1;
  3201. flushing = true;
  3202. spin_unlock(&space_info->lock);
  3203. ret = may_commit_transaction(root, space_info,
  3204. orig_bytes, 1);
  3205. if (ret)
  3206. goto out;
  3207. committed = true;
  3208. goto again;
  3209. }
  3210. spin_lock(&root->fs_info->free_chunk_lock);
  3211. avail = root->fs_info->free_chunk_space;
  3212. /*
  3213. * If we have dup, raid1 or raid10 then only half of the free
  3214. * space is actually useable.
  3215. */
  3216. if (profile & (BTRFS_BLOCK_GROUP_DUP |
  3217. BTRFS_BLOCK_GROUP_RAID1 |
  3218. BTRFS_BLOCK_GROUP_RAID10))
  3219. avail >>= 1;
  3220. /*
  3221. * If we aren't flushing don't let us overcommit too much, say
  3222. * 1/8th of the space. If we can flush, let it overcommit up to
  3223. * 1/2 of the space.
  3224. */
  3225. if (flush)
  3226. avail >>= 3;
  3227. else
  3228. avail >>= 1;
  3229. spin_unlock(&root->fs_info->free_chunk_lock);
  3230. if (used + num_bytes < space_info->total_bytes + avail) {
  3231. space_info->bytes_may_use += orig_bytes;
  3232. ret = 0;
  3233. } else {
  3234. wait_ordered = true;
  3235. }
  3236. }
  3237. /*
  3238. * Couldn't make our reservation, save our place so while we're trying
  3239. * to reclaim space we can actually use it instead of somebody else
  3240. * stealing it from us.
  3241. */
  3242. if (ret && flush) {
  3243. flushing = true;
  3244. space_info->flush = 1;
  3245. }
  3246. spin_unlock(&space_info->lock);
  3247. if (!ret || !flush)
  3248. goto out;
  3249. /*
  3250. * We do synchronous shrinking since we don't actually unreserve
  3251. * metadata until after the IO is completed.
  3252. */
  3253. ret = shrink_delalloc(root, num_bytes, wait_ordered);
  3254. if (ret < 0)
  3255. goto out;
  3256. ret = 0;
  3257. /*
  3258. * So if we were overcommitted it's possible that somebody else flushed
  3259. * out enough space and we simply didn't have enough space to reclaim,
  3260. * so go back around and try again.
  3261. */
  3262. if (retries < 2) {
  3263. wait_ordered = true;
  3264. retries++;
  3265. goto again;
  3266. }
  3267. ret = -ENOSPC;
  3268. if (committed)
  3269. goto out;
  3270. ret = may_commit_transaction(root, space_info, orig_bytes, 0);
  3271. if (!ret) {
  3272. committed = true;
  3273. goto again;
  3274. }
  3275. out:
  3276. if (flushing) {
  3277. spin_lock(&space_info->lock);
  3278. space_info->flush = 0;
  3279. wake_up_all(&space_info->wait);
  3280. spin_unlock(&space_info->lock);
  3281. }
  3282. return ret;
  3283. }
  3284. static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
  3285. struct btrfs_root *root)
  3286. {
  3287. struct btrfs_block_rsv *block_rsv = NULL;
  3288. if (root->ref_cows || root == root->fs_info->csum_root)
  3289. block_rsv = trans->block_rsv;
  3290. if (!block_rsv)
  3291. block_rsv = root->block_rsv;
  3292. if (!block_rsv)
  3293. block_rsv = &root->fs_info->empty_block_rsv;
  3294. return block_rsv;
  3295. }
  3296. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  3297. u64 num_bytes)
  3298. {
  3299. int ret = -ENOSPC;
  3300. spin_lock(&block_rsv->lock);
  3301. if (block_rsv->reserved >= num_bytes) {
  3302. block_rsv->reserved -= num_bytes;
  3303. if (block_rsv->reserved < block_rsv->size)
  3304. block_rsv->full = 0;
  3305. ret = 0;
  3306. }
  3307. spin_unlock(&block_rsv->lock);
  3308. return ret;
  3309. }
  3310. static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
  3311. u64 num_bytes, int update_size)
  3312. {
  3313. spin_lock(&block_rsv->lock);
  3314. block_rsv->reserved += num_bytes;
  3315. if (update_size)
  3316. block_rsv->size += num_bytes;
  3317. else if (block_rsv->reserved >= block_rsv->size)
  3318. block_rsv->full = 1;
  3319. spin_unlock(&block_rsv->lock);
  3320. }
  3321. static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
  3322. struct btrfs_block_rsv *dest, u64 num_bytes)
  3323. {
  3324. struct btrfs_space_info *space_info = block_rsv->space_info;
  3325. spin_lock(&block_rsv->lock);
  3326. if (num_bytes == (u64)-1)
  3327. num_bytes = block_rsv->size;
  3328. block_rsv->size -= num_bytes;
  3329. if (block_rsv->reserved >= block_rsv->size) {
  3330. num_bytes = block_rsv->reserved - block_rsv->size;
  3331. block_rsv->reserved = block_rsv->size;
  3332. block_rsv->full = 1;
  3333. } else {
  3334. num_bytes = 0;
  3335. }
  3336. spin_unlock(&block_rsv->lock);
  3337. if (num_bytes > 0) {
  3338. if (dest) {
  3339. spin_lock(&dest->lock);
  3340. if (!dest->full) {
  3341. u64 bytes_to_add;
  3342. bytes_to_add = dest->size - dest->reserved;
  3343. bytes_to_add = min(num_bytes, bytes_to_add);
  3344. dest->reserved += bytes_to_add;
  3345. if (dest->reserved >= dest->size)
  3346. dest->full = 1;
  3347. num_bytes -= bytes_to_add;
  3348. }
  3349. spin_unlock(&dest->lock);
  3350. }
  3351. if (num_bytes) {
  3352. spin_lock(&space_info->lock);
  3353. space_info->bytes_may_use -= num_bytes;
  3354. space_info->reservation_progress++;
  3355. spin_unlock(&space_info->lock);
  3356. }
  3357. }
  3358. }
  3359. static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
  3360. struct btrfs_block_rsv *dst, u64 num_bytes)
  3361. {
  3362. int ret;
  3363. ret = block_rsv_use_bytes(src, num_bytes);
  3364. if (ret)
  3365. return ret;
  3366. block_rsv_add_bytes(dst, num_bytes, 1);
  3367. return 0;
  3368. }
  3369. void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
  3370. {
  3371. memset(rsv, 0, sizeof(*rsv));
  3372. spin_lock_init(&rsv->lock);
  3373. }
  3374. struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
  3375. {
  3376. struct btrfs_block_rsv *block_rsv;
  3377. struct btrfs_fs_info *fs_info = root->fs_info;
  3378. block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
  3379. if (!block_rsv)
  3380. return NULL;
  3381. btrfs_init_block_rsv(block_rsv);
  3382. block_rsv->space_info = __find_space_info(fs_info,
  3383. BTRFS_BLOCK_GROUP_METADATA);
  3384. return block_rsv;
  3385. }
  3386. void btrfs_free_block_rsv(struct btrfs_root *root,
  3387. struct btrfs_block_rsv *rsv)
  3388. {
  3389. btrfs_block_rsv_release(root, rsv, (u64)-1);
  3390. kfree(rsv);
  3391. }
  3392. static inline int __block_rsv_add(struct btrfs_root *root,
  3393. struct btrfs_block_rsv *block_rsv,
  3394. u64 num_bytes, int flush)
  3395. {
  3396. int ret;
  3397. if (num_bytes == 0)
  3398. return 0;
  3399. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  3400. if (!ret) {
  3401. block_rsv_add_bytes(block_rsv, num_bytes, 1);
  3402. return 0;
  3403. }
  3404. return ret;
  3405. }
  3406. int btrfs_block_rsv_add(struct btrfs_root *root,
  3407. struct btrfs_block_rsv *block_rsv,
  3408. u64 num_bytes)
  3409. {
  3410. return __block_rsv_add(root, block_rsv, num_bytes, 1);
  3411. }
  3412. int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
  3413. struct btrfs_block_rsv *block_rsv,
  3414. u64 num_bytes)
  3415. {
  3416. return __block_rsv_add(root, block_rsv, num_bytes, 0);
  3417. }
  3418. int btrfs_block_rsv_check(struct btrfs_root *root,
  3419. struct btrfs_block_rsv *block_rsv, int min_factor)
  3420. {
  3421. u64 num_bytes = 0;
  3422. int ret = -ENOSPC;
  3423. if (!block_rsv)
  3424. return 0;
  3425. spin_lock(&block_rsv->lock);
  3426. num_bytes = div_factor(block_rsv->size, min_factor);
  3427. if (block_rsv->reserved >= num_bytes)
  3428. ret = 0;
  3429. spin_unlock(&block_rsv->lock);
  3430. return ret;
  3431. }
  3432. static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
  3433. struct btrfs_block_rsv *block_rsv,
  3434. u64 min_reserved, int flush)
  3435. {
  3436. u64 num_bytes = 0;
  3437. int ret = -ENOSPC;
  3438. if (!block_rsv)
  3439. return 0;
  3440. spin_lock(&block_rsv->lock);
  3441. num_bytes = min_reserved;
  3442. if (block_rsv->reserved >= num_bytes)
  3443. ret = 0;
  3444. else
  3445. num_bytes -= block_rsv->reserved;
  3446. spin_unlock(&block_rsv->lock);
  3447. if (!ret)
  3448. return 0;
  3449. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  3450. if (!ret) {
  3451. block_rsv_add_bytes(block_rsv, num_bytes, 0);
  3452. return 0;
  3453. }
  3454. return ret;
  3455. }
  3456. int btrfs_block_rsv_refill(struct btrfs_root *root,
  3457. struct btrfs_block_rsv *block_rsv,
  3458. u64 min_reserved)
  3459. {
  3460. return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
  3461. }
  3462. int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
  3463. struct btrfs_block_rsv *block_rsv,
  3464. u64 min_reserved)
  3465. {
  3466. return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
  3467. }
  3468. int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
  3469. struct btrfs_block_rsv *dst_rsv,
  3470. u64 num_bytes)
  3471. {
  3472. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  3473. }
  3474. void btrfs_block_rsv_release(struct btrfs_root *root,
  3475. struct btrfs_block_rsv *block_rsv,
  3476. u64 num_bytes)
  3477. {
  3478. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3479. if (global_rsv->full || global_rsv == block_rsv ||
  3480. block_rsv->space_info != global_rsv->space_info)
  3481. global_rsv = NULL;
  3482. block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
  3483. }
  3484. /*
  3485. * helper to calculate size of global block reservation.
  3486. * the desired value is sum of space used by extent tree,
  3487. * checksum tree and root tree
  3488. */
  3489. static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
  3490. {
  3491. struct btrfs_space_info *sinfo;
  3492. u64 num_bytes;
  3493. u64 meta_used;
  3494. u64 data_used;
  3495. int csum_size = btrfs_super_csum_size(fs_info->super_copy);
  3496. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
  3497. spin_lock(&sinfo->lock);
  3498. data_used = sinfo->bytes_used;
  3499. spin_unlock(&sinfo->lock);
  3500. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3501. spin_lock(&sinfo->lock);
  3502. if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
  3503. data_used = 0;
  3504. meta_used = sinfo->bytes_used;
  3505. spin_unlock(&sinfo->lock);
  3506. num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
  3507. csum_size * 2;
  3508. num_bytes += div64_u64(data_used + meta_used, 50);
  3509. if (num_bytes * 3 > meta_used)
  3510. num_bytes = div64_u64(meta_used, 3);
  3511. return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
  3512. }
  3513. static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
  3514. {
  3515. struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
  3516. struct btrfs_space_info *sinfo = block_rsv->space_info;
  3517. u64 num_bytes;
  3518. num_bytes = calc_global_metadata_size(fs_info);
  3519. spin_lock(&block_rsv->lock);
  3520. spin_lock(&sinfo->lock);
  3521. block_rsv->size = num_bytes;
  3522. num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
  3523. sinfo->bytes_reserved + sinfo->bytes_readonly +
  3524. sinfo->bytes_may_use;
  3525. if (sinfo->total_bytes > num_bytes) {
  3526. num_bytes = sinfo->total_bytes - num_bytes;
  3527. block_rsv->reserved += num_bytes;
  3528. sinfo->bytes_may_use += num_bytes;
  3529. }
  3530. if (block_rsv->reserved >= block_rsv->size) {
  3531. num_bytes = block_rsv->reserved - block_rsv->size;
  3532. sinfo->bytes_may_use -= num_bytes;
  3533. sinfo->reservation_progress++;
  3534. block_rsv->reserved = block_rsv->size;
  3535. block_rsv->full = 1;
  3536. }
  3537. spin_unlock(&sinfo->lock);
  3538. spin_unlock(&block_rsv->lock);
  3539. }
  3540. static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
  3541. {
  3542. struct btrfs_space_info *space_info;
  3543. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3544. fs_info->chunk_block_rsv.space_info = space_info;
  3545. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3546. fs_info->global_block_rsv.space_info = space_info;
  3547. fs_info->delalloc_block_rsv.space_info = space_info;
  3548. fs_info->trans_block_rsv.space_info = space_info;
  3549. fs_info->empty_block_rsv.space_info = space_info;
  3550. fs_info->delayed_block_rsv.space_info = space_info;
  3551. fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
  3552. fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
  3553. fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
  3554. fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
  3555. fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
  3556. update_global_block_rsv(fs_info);
  3557. }
  3558. static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
  3559. {
  3560. block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
  3561. WARN_ON(fs_info->delalloc_block_rsv.size > 0);
  3562. WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
  3563. WARN_ON(fs_info->trans_block_rsv.size > 0);
  3564. WARN_ON(fs_info->trans_block_rsv.reserved > 0);
  3565. WARN_ON(fs_info->chunk_block_rsv.size > 0);
  3566. WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
  3567. WARN_ON(fs_info->delayed_block_rsv.size > 0);
  3568. WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
  3569. }
  3570. void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
  3571. struct btrfs_root *root)
  3572. {
  3573. if (!trans->bytes_reserved)
  3574. return;
  3575. btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
  3576. trans->bytes_reserved = 0;
  3577. }
  3578. int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
  3579. struct inode *inode)
  3580. {
  3581. struct btrfs_root *root = BTRFS_I(inode)->root;
  3582. struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
  3583. struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
  3584. /*
  3585. * We need to hold space in order to delete our orphan item once we've
  3586. * added it, so this takes the reservation so we can release it later
  3587. * when we are truly done with the orphan item.
  3588. */
  3589. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  3590. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  3591. }
  3592. void btrfs_orphan_release_metadata(struct inode *inode)
  3593. {
  3594. struct btrfs_root *root = BTRFS_I(inode)->root;
  3595. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  3596. btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
  3597. }
  3598. int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
  3599. struct btrfs_pending_snapshot *pending)
  3600. {
  3601. struct btrfs_root *root = pending->root;
  3602. struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
  3603. struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
  3604. /*
  3605. * two for root back/forward refs, two for directory entries
  3606. * and one for root of the snapshot.
  3607. */
  3608. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
  3609. dst_rsv->space_info = src_rsv->space_info;
  3610. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  3611. }
  3612. /**
  3613. * drop_outstanding_extent - drop an outstanding extent
  3614. * @inode: the inode we're dropping the extent for
  3615. *
  3616. * This is called when we are freeing up an outstanding extent, either called
  3617. * after an error or after an extent is written. This will return the number of
  3618. * reserved extents that need to be freed. This must be called with
  3619. * BTRFS_I(inode)->lock held.
  3620. */
  3621. static unsigned drop_outstanding_extent(struct inode *inode)
  3622. {
  3623. unsigned drop_inode_space = 0;
  3624. unsigned dropped_extents = 0;
  3625. BUG_ON(!BTRFS_I(inode)->outstanding_extents);
  3626. BTRFS_I(inode)->outstanding_extents--;
  3627. if (BTRFS_I(inode)->outstanding_extents == 0 &&
  3628. BTRFS_I(inode)->delalloc_meta_reserved) {
  3629. drop_inode_space = 1;
  3630. BTRFS_I(inode)->delalloc_meta_reserved = 0;
  3631. }
  3632. /*
  3633. * If we have more or the same amount of outsanding extents than we have
  3634. * reserved then we need to leave the reserved extents count alone.
  3635. */
  3636. if (BTRFS_I(inode)->outstanding_extents >=
  3637. BTRFS_I(inode)->reserved_extents)
  3638. return drop_inode_space;
  3639. dropped_extents = BTRFS_I(inode)->reserved_extents -
  3640. BTRFS_I(inode)->outstanding_extents;
  3641. BTRFS_I(inode)->reserved_extents -= dropped_extents;
  3642. return dropped_extents + drop_inode_space;
  3643. }
  3644. /**
  3645. * calc_csum_metadata_size - return the amount of metada space that must be
  3646. * reserved/free'd for the given bytes.
  3647. * @inode: the inode we're manipulating
  3648. * @num_bytes: the number of bytes in question
  3649. * @reserve: 1 if we are reserving space, 0 if we are freeing space
  3650. *
  3651. * This adjusts the number of csum_bytes in the inode and then returns the
  3652. * correct amount of metadata that must either be reserved or freed. We
  3653. * calculate how many checksums we can fit into one leaf and then divide the
  3654. * number of bytes that will need to be checksumed by this value to figure out
  3655. * how many checksums will be required. If we are adding bytes then the number
  3656. * may go up and we will return the number of additional bytes that must be
  3657. * reserved. If it is going down we will return the number of bytes that must
  3658. * be freed.
  3659. *
  3660. * This must be called with BTRFS_I(inode)->lock held.
  3661. */
  3662. static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
  3663. int reserve)
  3664. {
  3665. struct btrfs_root *root = BTRFS_I(inode)->root;
  3666. u64 csum_size;
  3667. int num_csums_per_leaf;
  3668. int num_csums;
  3669. int old_csums;
  3670. if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
  3671. BTRFS_I(inode)->csum_bytes == 0)
  3672. return 0;
  3673. old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
  3674. if (reserve)
  3675. BTRFS_I(inode)->csum_bytes += num_bytes;
  3676. else
  3677. BTRFS_I(inode)->csum_bytes -= num_bytes;
  3678. csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
  3679. num_csums_per_leaf = (int)div64_u64(csum_size,
  3680. sizeof(struct btrfs_csum_item) +
  3681. sizeof(struct btrfs_disk_key));
  3682. num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
  3683. num_csums = num_csums + num_csums_per_leaf - 1;
  3684. num_csums = num_csums / num_csums_per_leaf;
  3685. old_csums = old_csums + num_csums_per_leaf - 1;
  3686. old_csums = old_csums / num_csums_per_leaf;
  3687. /* No change, no need to reserve more */
  3688. if (old_csums == num_csums)
  3689. return 0;
  3690. if (reserve)
  3691. return btrfs_calc_trans_metadata_size(root,
  3692. num_csums - old_csums);
  3693. return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
  3694. }
  3695. int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
  3696. {
  3697. struct btrfs_root *root = BTRFS_I(inode)->root;
  3698. struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
  3699. u64 to_reserve = 0;
  3700. u64 csum_bytes;
  3701. unsigned nr_extents = 0;
  3702. int extra_reserve = 0;
  3703. int flush = 1;
  3704. int ret;
  3705. /* Need to be holding the i_mutex here if we aren't free space cache */
  3706. if (btrfs_is_free_space_inode(root, inode))
  3707. flush = 0;
  3708. else
  3709. WARN_ON(!mutex_is_locked(&inode->i_mutex));
  3710. if (flush && btrfs_transaction_in_commit(root->fs_info))
  3711. schedule_timeout(1);
  3712. num_bytes = ALIGN(num_bytes, root->sectorsize);
  3713. spin_lock(&BTRFS_I(inode)->lock);
  3714. BTRFS_I(inode)->outstanding_extents++;
  3715. if (BTRFS_I(inode)->outstanding_extents >
  3716. BTRFS_I(inode)->reserved_extents)
  3717. nr_extents = BTRFS_I(inode)->outstanding_extents -
  3718. BTRFS_I(inode)->reserved_extents;
  3719. /*
  3720. * Add an item to reserve for updating the inode when we complete the
  3721. * delalloc io.
  3722. */
  3723. if (!BTRFS_I(inode)->delalloc_meta_reserved) {
  3724. nr_extents++;
  3725. extra_reserve = 1;
  3726. }
  3727. to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
  3728. to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
  3729. csum_bytes = BTRFS_I(inode)->csum_bytes;
  3730. spin_unlock(&BTRFS_I(inode)->lock);
  3731. ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
  3732. if (ret) {
  3733. u64 to_free = 0;
  3734. unsigned dropped;
  3735. spin_lock(&BTRFS_I(inode)->lock);
  3736. dropped = drop_outstanding_extent(inode);
  3737. /*
  3738. * If the inodes csum_bytes is the same as the original
  3739. * csum_bytes then we know we haven't raced with any free()ers
  3740. * so we can just reduce our inodes csum bytes and carry on.
  3741. * Otherwise we have to do the normal free thing to account for
  3742. * the case that the free side didn't free up its reserve
  3743. * because of this outstanding reservation.
  3744. */
  3745. if (BTRFS_I(inode)->csum_bytes == csum_bytes)
  3746. calc_csum_metadata_size(inode, num_bytes, 0);
  3747. else
  3748. to_free = calc_csum_metadata_size(inode, num_bytes, 0);
  3749. spin_unlock(&BTRFS_I(inode)->lock);
  3750. if (dropped)
  3751. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  3752. if (to_free)
  3753. btrfs_block_rsv_release(root, block_rsv, to_free);
  3754. return ret;
  3755. }
  3756. spin_lock(&BTRFS_I(inode)->lock);
  3757. if (extra_reserve) {
  3758. BTRFS_I(inode)->delalloc_meta_reserved = 1;
  3759. nr_extents--;
  3760. }
  3761. BTRFS_I(inode)->reserved_extents += nr_extents;
  3762. spin_unlock(&BTRFS_I(inode)->lock);
  3763. block_rsv_add_bytes(block_rsv, to_reserve, 1);
  3764. return 0;
  3765. }
  3766. /**
  3767. * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
  3768. * @inode: the inode to release the reservation for
  3769. * @num_bytes: the number of bytes we're releasing
  3770. *
  3771. * This will release the metadata reservation for an inode. This can be called
  3772. * once we complete IO for a given set of bytes to release their metadata
  3773. * reservations.
  3774. */
  3775. void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
  3776. {
  3777. struct btrfs_root *root = BTRFS_I(inode)->root;
  3778. u64 to_free = 0;
  3779. unsigned dropped;
  3780. num_bytes = ALIGN(num_bytes, root->sectorsize);
  3781. spin_lock(&BTRFS_I(inode)->lock);
  3782. dropped = drop_outstanding_extent(inode);
  3783. to_free = calc_csum_metadata_size(inode, num_bytes, 0);
  3784. spin_unlock(&BTRFS_I(inode)->lock);
  3785. if (dropped > 0)
  3786. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  3787. btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
  3788. to_free);
  3789. }
  3790. /**
  3791. * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
  3792. * @inode: inode we're writing to
  3793. * @num_bytes: the number of bytes we want to allocate
  3794. *
  3795. * This will do the following things
  3796. *
  3797. * o reserve space in the data space info for num_bytes
  3798. * o reserve space in the metadata space info based on number of outstanding
  3799. * extents and how much csums will be needed
  3800. * o add to the inodes ->delalloc_bytes
  3801. * o add it to the fs_info's delalloc inodes list.
  3802. *
  3803. * This will return 0 for success and -ENOSPC if there is no space left.
  3804. */
  3805. int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
  3806. {
  3807. int ret;
  3808. ret = btrfs_check_data_free_space(inode, num_bytes);
  3809. if (ret)
  3810. return ret;
  3811. ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
  3812. if (ret) {
  3813. btrfs_free_reserved_data_space(inode, num_bytes);
  3814. return ret;
  3815. }
  3816. return 0;
  3817. }
  3818. /**
  3819. * btrfs_delalloc_release_space - release data and metadata space for delalloc
  3820. * @inode: inode we're releasing space for
  3821. * @num_bytes: the number of bytes we want to free up
  3822. *
  3823. * This must be matched with a call to btrfs_delalloc_reserve_space. This is
  3824. * called in the case that we don't need the metadata AND data reservations
  3825. * anymore. So if there is an error or we insert an inline extent.
  3826. *
  3827. * This function will release the metadata space that was not used and will
  3828. * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
  3829. * list if there are no delalloc bytes left.
  3830. */
  3831. void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
  3832. {
  3833. btrfs_delalloc_release_metadata(inode, num_bytes);
  3834. btrfs_free_reserved_data_space(inode, num_bytes);
  3835. }
  3836. static int update_block_group(struct btrfs_trans_handle *trans,
  3837. struct btrfs_root *root,
  3838. u64 bytenr, u64 num_bytes, int alloc)
  3839. {
  3840. struct btrfs_block_group_cache *cache = NULL;
  3841. struct btrfs_fs_info *info = root->fs_info;
  3842. u64 total = num_bytes;
  3843. u64 old_val;
  3844. u64 byte_in_group;
  3845. int factor;
  3846. /* block accounting for super block */
  3847. spin_lock(&info->delalloc_lock);
  3848. old_val = btrfs_super_bytes_used(info->super_copy);
  3849. if (alloc)
  3850. old_val += num_bytes;
  3851. else
  3852. old_val -= num_bytes;
  3853. btrfs_set_super_bytes_used(info->super_copy, old_val);
  3854. spin_unlock(&info->delalloc_lock);
  3855. while (total) {
  3856. cache = btrfs_lookup_block_group(info, bytenr);
  3857. if (!cache)
  3858. return -1;
  3859. if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
  3860. BTRFS_BLOCK_GROUP_RAID1 |
  3861. BTRFS_BLOCK_GROUP_RAID10))
  3862. factor = 2;
  3863. else
  3864. factor = 1;
  3865. /*
  3866. * If this block group has free space cache written out, we
  3867. * need to make sure to load it if we are removing space. This
  3868. * is because we need the unpinning stage to actually add the
  3869. * space back to the block group, otherwise we will leak space.
  3870. */
  3871. if (!alloc && cache->cached == BTRFS_CACHE_NO)
  3872. cache_block_group(cache, trans, NULL, 1);
  3873. byte_in_group = bytenr - cache->key.objectid;
  3874. WARN_ON(byte_in_group > cache->key.offset);
  3875. spin_lock(&cache->space_info->lock);
  3876. spin_lock(&cache->lock);
  3877. if (btrfs_test_opt(root, SPACE_CACHE) &&
  3878. cache->disk_cache_state < BTRFS_DC_CLEAR)
  3879. cache->disk_cache_state = BTRFS_DC_CLEAR;
  3880. cache->dirty = 1;
  3881. old_val = btrfs_block_group_used(&cache->item);
  3882. num_bytes = min(total, cache->key.offset - byte_in_group);
  3883. if (alloc) {
  3884. old_val += num_bytes;
  3885. btrfs_set_block_group_used(&cache->item, old_val);
  3886. cache->reserved -= num_bytes;
  3887. cache->space_info->bytes_reserved -= num_bytes;
  3888. cache->space_info->bytes_used += num_bytes;
  3889. cache->space_info->disk_used += num_bytes * factor;
  3890. spin_unlock(&cache->lock);
  3891. spin_unlock(&cache->space_info->lock);
  3892. } else {
  3893. old_val -= num_bytes;
  3894. btrfs_set_block_group_used(&cache->item, old_val);
  3895. cache->pinned += num_bytes;
  3896. cache->space_info->bytes_pinned += num_bytes;
  3897. cache->space_info->bytes_used -= num_bytes;
  3898. cache->space_info->disk_used -= num_bytes * factor;
  3899. spin_unlock(&cache->lock);
  3900. spin_unlock(&cache->space_info->lock);
  3901. set_extent_dirty(info->pinned_extents,
  3902. bytenr, bytenr + num_bytes - 1,
  3903. GFP_NOFS | __GFP_NOFAIL);
  3904. }
  3905. btrfs_put_block_group(cache);
  3906. total -= num_bytes;
  3907. bytenr += num_bytes;
  3908. }
  3909. return 0;
  3910. }
  3911. static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
  3912. {
  3913. struct btrfs_block_group_cache *cache;
  3914. u64 bytenr;
  3915. cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
  3916. if (!cache)
  3917. return 0;
  3918. bytenr = cache->key.objectid;
  3919. btrfs_put_block_group(cache);
  3920. return bytenr;
  3921. }
  3922. static int pin_down_extent(struct btrfs_root *root,
  3923. struct btrfs_block_group_cache *cache,
  3924. u64 bytenr, u64 num_bytes, int reserved)
  3925. {
  3926. spin_lock(&cache->space_info->lock);
  3927. spin_lock(&cache->lock);
  3928. cache->pinned += num_bytes;
  3929. cache->space_info->bytes_pinned += num_bytes;
  3930. if (reserved) {
  3931. cache->reserved -= num_bytes;
  3932. cache->space_info->bytes_reserved -= num_bytes;
  3933. }
  3934. spin_unlock(&cache->lock);
  3935. spin_unlock(&cache->space_info->lock);
  3936. set_extent_dirty(root->fs_info->pinned_extents, bytenr,
  3937. bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
  3938. return 0;
  3939. }
  3940. /*
  3941. * this function must be called within transaction
  3942. */
  3943. int btrfs_pin_extent(struct btrfs_root *root,
  3944. u64 bytenr, u64 num_bytes, int reserved)
  3945. {
  3946. struct btrfs_block_group_cache *cache;
  3947. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  3948. BUG_ON(!cache);
  3949. pin_down_extent(root, cache, bytenr, num_bytes, reserved);
  3950. btrfs_put_block_group(cache);
  3951. return 0;
  3952. }
  3953. /*
  3954. * this function must be called within transaction
  3955. */
  3956. int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
  3957. struct btrfs_root *root,
  3958. u64 bytenr, u64 num_bytes)
  3959. {
  3960. struct btrfs_block_group_cache *cache;
  3961. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  3962. BUG_ON(!cache);
  3963. /*
  3964. * pull in the free space cache (if any) so that our pin
  3965. * removes the free space from the cache. We have load_only set
  3966. * to one because the slow code to read in the free extents does check
  3967. * the pinned extents.
  3968. */
  3969. cache_block_group(cache, trans, root, 1);
  3970. pin_down_extent(root, cache, bytenr, num_bytes, 0);
  3971. /* remove us from the free space cache (if we're there at all) */
  3972. btrfs_remove_free_space(cache, bytenr, num_bytes);
  3973. btrfs_put_block_group(cache);
  3974. return 0;
  3975. }
  3976. /**
  3977. * btrfs_update_reserved_bytes - update the block_group and space info counters
  3978. * @cache: The cache we are manipulating
  3979. * @num_bytes: The number of bytes in question
  3980. * @reserve: One of the reservation enums
  3981. *
  3982. * This is called by the allocator when it reserves space, or by somebody who is
  3983. * freeing space that was never actually used on disk. For example if you
  3984. * reserve some space for a new leaf in transaction A and before transaction A
  3985. * commits you free that leaf, you call this with reserve set to 0 in order to
  3986. * clear the reservation.
  3987. *
  3988. * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
  3989. * ENOSPC accounting. For data we handle the reservation through clearing the
  3990. * delalloc bits in the io_tree. We have to do this since we could end up
  3991. * allocating less disk space for the amount of data we have reserved in the
  3992. * case of compression.
  3993. *
  3994. * If this is a reservation and the block group has become read only we cannot
  3995. * make the reservation and return -EAGAIN, otherwise this function always
  3996. * succeeds.
  3997. */
  3998. static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
  3999. u64 num_bytes, int reserve)
  4000. {
  4001. struct btrfs_space_info *space_info = cache->space_info;
  4002. int ret = 0;
  4003. spin_lock(&space_info->lock);
  4004. spin_lock(&cache->lock);
  4005. if (reserve != RESERVE_FREE) {
  4006. if (cache->ro) {
  4007. ret = -EAGAIN;
  4008. } else {
  4009. cache->reserved += num_bytes;
  4010. space_info->bytes_reserved += num_bytes;
  4011. if (reserve == RESERVE_ALLOC) {
  4012. BUG_ON(space_info->bytes_may_use < num_bytes);
  4013. space_info->bytes_may_use -= num_bytes;
  4014. }
  4015. }
  4016. } else {
  4017. if (cache->ro)
  4018. space_info->bytes_readonly += num_bytes;
  4019. cache->reserved -= num_bytes;
  4020. space_info->bytes_reserved -= num_bytes;
  4021. space_info->reservation_progress++;
  4022. }
  4023. spin_unlock(&cache->lock);
  4024. spin_unlock(&space_info->lock);
  4025. return ret;
  4026. }
  4027. int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
  4028. struct btrfs_root *root)
  4029. {
  4030. struct btrfs_fs_info *fs_info = root->fs_info;
  4031. struct btrfs_caching_control *next;
  4032. struct btrfs_caching_control *caching_ctl;
  4033. struct btrfs_block_group_cache *cache;
  4034. down_write(&fs_info->extent_commit_sem);
  4035. list_for_each_entry_safe(caching_ctl, next,
  4036. &fs_info->caching_block_groups, list) {
  4037. cache = caching_ctl->block_group;
  4038. if (block_group_cache_done(cache)) {
  4039. cache->last_byte_to_unpin = (u64)-1;
  4040. list_del_init(&caching_ctl->list);
  4041. put_caching_control(caching_ctl);
  4042. } else {
  4043. cache->last_byte_to_unpin = caching_ctl->progress;
  4044. }
  4045. }
  4046. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  4047. fs_info->pinned_extents = &fs_info->freed_extents[1];
  4048. else
  4049. fs_info->pinned_extents = &fs_info->freed_extents[0];
  4050. up_write(&fs_info->extent_commit_sem);
  4051. update_global_block_rsv(fs_info);
  4052. return 0;
  4053. }
  4054. static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  4055. {
  4056. struct btrfs_fs_info *fs_info = root->fs_info;
  4057. struct btrfs_block_group_cache *cache = NULL;
  4058. u64 len;
  4059. while (start <= end) {
  4060. if (!cache ||
  4061. start >= cache->key.objectid + cache->key.offset) {
  4062. if (cache)
  4063. btrfs_put_block_group(cache);
  4064. cache = btrfs_lookup_block_group(fs_info, start);
  4065. BUG_ON(!cache);
  4066. }
  4067. len = cache->key.objectid + cache->key.offset - start;
  4068. len = min(len, end + 1 - start);
  4069. if (start < cache->last_byte_to_unpin) {
  4070. len = min(len, cache->last_byte_to_unpin - start);
  4071. btrfs_add_free_space(cache, start, len);
  4072. }
  4073. start += len;
  4074. spin_lock(&cache->space_info->lock);
  4075. spin_lock(&cache->lock);
  4076. cache->pinned -= len;
  4077. cache->space_info->bytes_pinned -= len;
  4078. if (cache->ro)
  4079. cache->space_info->bytes_readonly += len;
  4080. spin_unlock(&cache->lock);
  4081. spin_unlock(&cache->space_info->lock);
  4082. }
  4083. if (cache)
  4084. btrfs_put_block_group(cache);
  4085. return 0;
  4086. }
  4087. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  4088. struct btrfs_root *root)
  4089. {
  4090. struct btrfs_fs_info *fs_info = root->fs_info;
  4091. struct extent_io_tree *unpin;
  4092. u64 start;
  4093. u64 end;
  4094. int ret;
  4095. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  4096. unpin = &fs_info->freed_extents[1];
  4097. else
  4098. unpin = &fs_info->freed_extents[0];
  4099. while (1) {
  4100. ret = find_first_extent_bit(unpin, 0, &start, &end,
  4101. EXTENT_DIRTY);
  4102. if (ret)
  4103. break;
  4104. if (btrfs_test_opt(root, DISCARD))
  4105. ret = btrfs_discard_extent(root, start,
  4106. end + 1 - start, NULL);
  4107. clear_extent_dirty(unpin, start, end, GFP_NOFS);
  4108. unpin_extent_range(root, start, end);
  4109. cond_resched();
  4110. }
  4111. return 0;
  4112. }
  4113. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  4114. struct btrfs_root *root,
  4115. u64 bytenr, u64 num_bytes, u64 parent,
  4116. u64 root_objectid, u64 owner_objectid,
  4117. u64 owner_offset, int refs_to_drop,
  4118. struct btrfs_delayed_extent_op *extent_op)
  4119. {
  4120. struct btrfs_key key;
  4121. struct btrfs_path *path;
  4122. struct btrfs_fs_info *info = root->fs_info;
  4123. struct btrfs_root *extent_root = info->extent_root;
  4124. struct extent_buffer *leaf;
  4125. struct btrfs_extent_item *ei;
  4126. struct btrfs_extent_inline_ref *iref;
  4127. int ret;
  4128. int is_data;
  4129. int extent_slot = 0;
  4130. int found_extent = 0;
  4131. int num_to_del = 1;
  4132. u32 item_size;
  4133. u64 refs;
  4134. path = btrfs_alloc_path();
  4135. if (!path)
  4136. return -ENOMEM;
  4137. path->reada = 1;
  4138. path->leave_spinning = 1;
  4139. is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
  4140. BUG_ON(!is_data && refs_to_drop != 1);
  4141. ret = lookup_extent_backref(trans, extent_root, path, &iref,
  4142. bytenr, num_bytes, parent,
  4143. root_objectid, owner_objectid,
  4144. owner_offset);
  4145. if (ret == 0) {
  4146. extent_slot = path->slots[0];
  4147. while (extent_slot >= 0) {
  4148. btrfs_item_key_to_cpu(path->nodes[0], &key,
  4149. extent_slot);
  4150. if (key.objectid != bytenr)
  4151. break;
  4152. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  4153. key.offset == num_bytes) {
  4154. found_extent = 1;
  4155. break;
  4156. }
  4157. if (path->slots[0] - extent_slot > 5)
  4158. break;
  4159. extent_slot--;
  4160. }
  4161. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  4162. item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
  4163. if (found_extent && item_size < sizeof(*ei))
  4164. found_extent = 0;
  4165. #endif
  4166. if (!found_extent) {
  4167. BUG_ON(iref);
  4168. ret = remove_extent_backref(trans, extent_root, path,
  4169. NULL, refs_to_drop,
  4170. is_data);
  4171. BUG_ON(ret);
  4172. btrfs_release_path(path);
  4173. path->leave_spinning = 1;
  4174. key.objectid = bytenr;
  4175. key.type = BTRFS_EXTENT_ITEM_KEY;
  4176. key.offset = num_bytes;
  4177. ret = btrfs_search_slot(trans, extent_root,
  4178. &key, path, -1, 1);
  4179. if (ret) {
  4180. printk(KERN_ERR "umm, got %d back from search"
  4181. ", was looking for %llu\n", ret,
  4182. (unsigned long long)bytenr);
  4183. if (ret > 0)
  4184. btrfs_print_leaf(extent_root,
  4185. path->nodes[0]);
  4186. }
  4187. BUG_ON(ret);
  4188. extent_slot = path->slots[0];
  4189. }
  4190. } else {
  4191. btrfs_print_leaf(extent_root, path->nodes[0]);
  4192. WARN_ON(1);
  4193. printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
  4194. "parent %llu root %llu owner %llu offset %llu\n",
  4195. (unsigned long long)bytenr,
  4196. (unsigned long long)parent,
  4197. (unsigned long long)root_objectid,
  4198. (unsigned long long)owner_objectid,
  4199. (unsigned long long)owner_offset);
  4200. }
  4201. leaf = path->nodes[0];
  4202. item_size = btrfs_item_size_nr(leaf, extent_slot);
  4203. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  4204. if (item_size < sizeof(*ei)) {
  4205. BUG_ON(found_extent || extent_slot != path->slots[0]);
  4206. ret = convert_extent_item_v0(trans, extent_root, path,
  4207. owner_objectid, 0);
  4208. BUG_ON(ret < 0);
  4209. btrfs_release_path(path);
  4210. path->leave_spinning = 1;
  4211. key.objectid = bytenr;
  4212. key.type = BTRFS_EXTENT_ITEM_KEY;
  4213. key.offset = num_bytes;
  4214. ret = btrfs_search_slot(trans, extent_root, &key, path,
  4215. -1, 1);
  4216. if (ret) {
  4217. printk(KERN_ERR "umm, got %d back from search"
  4218. ", was looking for %llu\n", ret,
  4219. (unsigned long long)bytenr);
  4220. btrfs_print_leaf(extent_root, path->nodes[0]);
  4221. }
  4222. BUG_ON(ret);
  4223. extent_slot = path->slots[0];
  4224. leaf = path->nodes[0];
  4225. item_size = btrfs_item_size_nr(leaf, extent_slot);
  4226. }
  4227. #endif
  4228. BUG_ON(item_size < sizeof(*ei));
  4229. ei = btrfs_item_ptr(leaf, extent_slot,
  4230. struct btrfs_extent_item);
  4231. if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
  4232. struct btrfs_tree_block_info *bi;
  4233. BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
  4234. bi = (struct btrfs_tree_block_info *)(ei + 1);
  4235. WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
  4236. }
  4237. refs = btrfs_extent_refs(leaf, ei);
  4238. BUG_ON(refs < refs_to_drop);
  4239. refs -= refs_to_drop;
  4240. if (refs > 0) {
  4241. if (extent_op)
  4242. __run_delayed_extent_op(extent_op, leaf, ei);
  4243. /*
  4244. * In the case of inline back ref, reference count will
  4245. * be updated by remove_extent_backref
  4246. */
  4247. if (iref) {
  4248. BUG_ON(!found_extent);
  4249. } else {
  4250. btrfs_set_extent_refs(leaf, ei, refs);
  4251. btrfs_mark_buffer_dirty(leaf);
  4252. }
  4253. if (found_extent) {
  4254. ret = remove_extent_backref(trans, extent_root, path,
  4255. iref, refs_to_drop,
  4256. is_data);
  4257. BUG_ON(ret);
  4258. }
  4259. } else {
  4260. if (found_extent) {
  4261. BUG_ON(is_data && refs_to_drop !=
  4262. extent_data_ref_count(root, path, iref));
  4263. if (iref) {
  4264. BUG_ON(path->slots[0] != extent_slot);
  4265. } else {
  4266. BUG_ON(path->slots[0] != extent_slot + 1);
  4267. path->slots[0] = extent_slot;
  4268. num_to_del = 2;
  4269. }
  4270. }
  4271. ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
  4272. num_to_del);
  4273. BUG_ON(ret);
  4274. btrfs_release_path(path);
  4275. if (is_data) {
  4276. ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
  4277. BUG_ON(ret);
  4278. } else {
  4279. invalidate_mapping_pages(info->btree_inode->i_mapping,
  4280. bytenr >> PAGE_CACHE_SHIFT,
  4281. (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
  4282. }
  4283. ret = update_block_group(trans, root, bytenr, num_bytes, 0);
  4284. BUG_ON(ret);
  4285. }
  4286. btrfs_free_path(path);
  4287. return ret;
  4288. }
  4289. /*
  4290. * when we free an block, it is possible (and likely) that we free the last
  4291. * delayed ref for that extent as well. This searches the delayed ref tree for
  4292. * a given extent, and if there are no other delayed refs to be processed, it
  4293. * removes it from the tree.
  4294. */
  4295. static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
  4296. struct btrfs_root *root, u64 bytenr)
  4297. {
  4298. struct btrfs_delayed_ref_head *head;
  4299. struct btrfs_delayed_ref_root *delayed_refs;
  4300. struct btrfs_delayed_ref_node *ref;
  4301. struct rb_node *node;
  4302. int ret = 0;
  4303. delayed_refs = &trans->transaction->delayed_refs;
  4304. spin_lock(&delayed_refs->lock);
  4305. head = btrfs_find_delayed_ref_head(trans, bytenr);
  4306. if (!head)
  4307. goto out;
  4308. node = rb_prev(&head->node.rb_node);
  4309. if (!node)
  4310. goto out;
  4311. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  4312. /* there are still entries for this ref, we can't drop it */
  4313. if (ref->bytenr == bytenr)
  4314. goto out;
  4315. if (head->extent_op) {
  4316. if (!head->must_insert_reserved)
  4317. goto out;
  4318. kfree(head->extent_op);
  4319. head->extent_op = NULL;
  4320. }
  4321. /*
  4322. * waiting for the lock here would deadlock. If someone else has it
  4323. * locked they are already in the process of dropping it anyway
  4324. */
  4325. if (!mutex_trylock(&head->mutex))
  4326. goto out;
  4327. /*
  4328. * at this point we have a head with no other entries. Go
  4329. * ahead and process it.
  4330. */
  4331. head->node.in_tree = 0;
  4332. rb_erase(&head->node.rb_node, &delayed_refs->root);
  4333. delayed_refs->num_entries--;
  4334. /*
  4335. * we don't take a ref on the node because we're removing it from the
  4336. * tree, so we just steal the ref the tree was holding.
  4337. */
  4338. delayed_refs->num_heads--;
  4339. if (list_empty(&head->cluster))
  4340. delayed_refs->num_heads_ready--;
  4341. list_del_init(&head->cluster);
  4342. spin_unlock(&delayed_refs->lock);
  4343. BUG_ON(head->extent_op);
  4344. if (head->must_insert_reserved)
  4345. ret = 1;
  4346. mutex_unlock(&head->mutex);
  4347. btrfs_put_delayed_ref(&head->node);
  4348. return ret;
  4349. out:
  4350. spin_unlock(&delayed_refs->lock);
  4351. return 0;
  4352. }
  4353. void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
  4354. struct btrfs_root *root,
  4355. struct extent_buffer *buf,
  4356. u64 parent, int last_ref)
  4357. {
  4358. struct btrfs_block_group_cache *cache = NULL;
  4359. int ret;
  4360. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  4361. ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
  4362. parent, root->root_key.objectid,
  4363. btrfs_header_level(buf),
  4364. BTRFS_DROP_DELAYED_REF, NULL);
  4365. BUG_ON(ret);
  4366. }
  4367. if (!last_ref)
  4368. return;
  4369. cache = btrfs_lookup_block_group(root->fs_info, buf->start);
  4370. if (btrfs_header_generation(buf) == trans->transid) {
  4371. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  4372. ret = check_ref_cleanup(trans, root, buf->start);
  4373. if (!ret)
  4374. goto out;
  4375. }
  4376. if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
  4377. pin_down_extent(root, cache, buf->start, buf->len, 1);
  4378. goto out;
  4379. }
  4380. WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
  4381. btrfs_add_free_space(cache, buf->start, buf->len);
  4382. btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
  4383. }
  4384. out:
  4385. /*
  4386. * Deleting the buffer, clear the corrupt flag since it doesn't matter
  4387. * anymore.
  4388. */
  4389. clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
  4390. btrfs_put_block_group(cache);
  4391. }
  4392. int btrfs_free_extent(struct btrfs_trans_handle *trans,
  4393. struct btrfs_root *root,
  4394. u64 bytenr, u64 num_bytes, u64 parent,
  4395. u64 root_objectid, u64 owner, u64 offset)
  4396. {
  4397. int ret;
  4398. /*
  4399. * tree log blocks never actually go into the extent allocation
  4400. * tree, just update pinning info and exit early.
  4401. */
  4402. if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
  4403. WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
  4404. /* unlocks the pinned mutex */
  4405. btrfs_pin_extent(root, bytenr, num_bytes, 1);
  4406. ret = 0;
  4407. } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  4408. ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
  4409. parent, root_objectid, (int)owner,
  4410. BTRFS_DROP_DELAYED_REF, NULL);
  4411. BUG_ON(ret);
  4412. } else {
  4413. ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
  4414. parent, root_objectid, owner,
  4415. offset, BTRFS_DROP_DELAYED_REF, NULL);
  4416. BUG_ON(ret);
  4417. }
  4418. return ret;
  4419. }
  4420. static u64 stripe_align(struct btrfs_root *root, u64 val)
  4421. {
  4422. u64 mask = ((u64)root->stripesize - 1);
  4423. u64 ret = (val + mask) & ~mask;
  4424. return ret;
  4425. }
  4426. /*
  4427. * when we wait for progress in the block group caching, its because
  4428. * our allocation attempt failed at least once. So, we must sleep
  4429. * and let some progress happen before we try again.
  4430. *
  4431. * This function will sleep at least once waiting for new free space to
  4432. * show up, and then it will check the block group free space numbers
  4433. * for our min num_bytes. Another option is to have it go ahead
  4434. * and look in the rbtree for a free extent of a given size, but this
  4435. * is a good start.
  4436. */
  4437. static noinline int
  4438. wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
  4439. u64 num_bytes)
  4440. {
  4441. struct btrfs_caching_control *caching_ctl;
  4442. DEFINE_WAIT(wait);
  4443. caching_ctl = get_caching_control(cache);
  4444. if (!caching_ctl)
  4445. return 0;
  4446. wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
  4447. (cache->free_space_ctl->free_space >= num_bytes));
  4448. put_caching_control(caching_ctl);
  4449. return 0;
  4450. }
  4451. static noinline int
  4452. wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
  4453. {
  4454. struct btrfs_caching_control *caching_ctl;
  4455. DEFINE_WAIT(wait);
  4456. caching_ctl = get_caching_control(cache);
  4457. if (!caching_ctl)
  4458. return 0;
  4459. wait_event(caching_ctl->wait, block_group_cache_done(cache));
  4460. put_caching_control(caching_ctl);
  4461. return 0;
  4462. }
  4463. static int get_block_group_index(struct btrfs_block_group_cache *cache)
  4464. {
  4465. int index;
  4466. if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
  4467. index = 0;
  4468. else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
  4469. index = 1;
  4470. else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
  4471. index = 2;
  4472. else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
  4473. index = 3;
  4474. else
  4475. index = 4;
  4476. return index;
  4477. }
  4478. enum btrfs_loop_type {
  4479. LOOP_FIND_IDEAL = 0,
  4480. LOOP_CACHING_NOWAIT = 1,
  4481. LOOP_CACHING_WAIT = 2,
  4482. LOOP_ALLOC_CHUNK = 3,
  4483. LOOP_NO_EMPTY_SIZE = 4,
  4484. };
  4485. /*
  4486. * walks the btree of allocated extents and find a hole of a given size.
  4487. * The key ins is changed to record the hole:
  4488. * ins->objectid == block start
  4489. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  4490. * ins->offset == number of blocks
  4491. * Any available blocks before search_start are skipped.
  4492. */
  4493. static noinline int find_free_extent(struct btrfs_trans_handle *trans,
  4494. struct btrfs_root *orig_root,
  4495. u64 num_bytes, u64 empty_size,
  4496. u64 search_start, u64 search_end,
  4497. u64 hint_byte, struct btrfs_key *ins,
  4498. u64 data)
  4499. {
  4500. int ret = 0;
  4501. struct btrfs_root *root = orig_root->fs_info->extent_root;
  4502. struct btrfs_free_cluster *last_ptr = NULL;
  4503. struct btrfs_block_group_cache *block_group = NULL;
  4504. struct btrfs_block_group_cache *used_block_group;
  4505. int empty_cluster = 2 * 1024 * 1024;
  4506. int allowed_chunk_alloc = 0;
  4507. int done_chunk_alloc = 0;
  4508. struct btrfs_space_info *space_info;
  4509. int loop = 0;
  4510. int index = 0;
  4511. int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
  4512. RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
  4513. bool found_uncached_bg = false;
  4514. bool failed_cluster_refill = false;
  4515. bool failed_alloc = false;
  4516. bool use_cluster = true;
  4517. bool have_caching_bg = false;
  4518. u64 ideal_cache_percent = 0;
  4519. u64 ideal_cache_offset = 0;
  4520. WARN_ON(num_bytes < root->sectorsize);
  4521. btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
  4522. ins->objectid = 0;
  4523. ins->offset = 0;
  4524. space_info = __find_space_info(root->fs_info, data);
  4525. if (!space_info) {
  4526. printk(KERN_ERR "No space info for %llu\n", data);
  4527. return -ENOSPC;
  4528. }
  4529. /*
  4530. * If the space info is for both data and metadata it means we have a
  4531. * small filesystem and we can't use the clustering stuff.
  4532. */
  4533. if (btrfs_mixed_space_info(space_info))
  4534. use_cluster = false;
  4535. if (orig_root->ref_cows || empty_size)
  4536. allowed_chunk_alloc = 1;
  4537. if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
  4538. last_ptr = &root->fs_info->meta_alloc_cluster;
  4539. if (!btrfs_test_opt(root, SSD))
  4540. empty_cluster = 64 * 1024;
  4541. }
  4542. if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
  4543. btrfs_test_opt(root, SSD)) {
  4544. last_ptr = &root->fs_info->data_alloc_cluster;
  4545. }
  4546. if (last_ptr) {
  4547. spin_lock(&last_ptr->lock);
  4548. if (last_ptr->block_group)
  4549. hint_byte = last_ptr->window_start;
  4550. spin_unlock(&last_ptr->lock);
  4551. }
  4552. search_start = max(search_start, first_logical_byte(root, 0));
  4553. search_start = max(search_start, hint_byte);
  4554. if (!last_ptr)
  4555. empty_cluster = 0;
  4556. if (search_start == hint_byte) {
  4557. ideal_cache:
  4558. block_group = btrfs_lookup_block_group(root->fs_info,
  4559. search_start);
  4560. used_block_group = block_group;
  4561. /*
  4562. * we don't want to use the block group if it doesn't match our
  4563. * allocation bits, or if its not cached.
  4564. *
  4565. * However if we are re-searching with an ideal block group
  4566. * picked out then we don't care that the block group is cached.
  4567. */
  4568. if (block_group && block_group_bits(block_group, data) &&
  4569. (block_group->cached != BTRFS_CACHE_NO ||
  4570. search_start == ideal_cache_offset)) {
  4571. down_read(&space_info->groups_sem);
  4572. if (list_empty(&block_group->list) ||
  4573. block_group->ro) {
  4574. /*
  4575. * someone is removing this block group,
  4576. * we can't jump into the have_block_group
  4577. * target because our list pointers are not
  4578. * valid
  4579. */
  4580. btrfs_put_block_group(block_group);
  4581. up_read(&space_info->groups_sem);
  4582. } else {
  4583. index = get_block_group_index(block_group);
  4584. goto have_block_group;
  4585. }
  4586. } else if (block_group) {
  4587. btrfs_put_block_group(block_group);
  4588. }
  4589. }
  4590. search:
  4591. have_caching_bg = false;
  4592. down_read(&space_info->groups_sem);
  4593. list_for_each_entry(block_group, &space_info->block_groups[index],
  4594. list) {
  4595. u64 offset;
  4596. int cached;
  4597. used_block_group = block_group;
  4598. btrfs_get_block_group(block_group);
  4599. search_start = block_group->key.objectid;
  4600. /*
  4601. * this can happen if we end up cycling through all the
  4602. * raid types, but we want to make sure we only allocate
  4603. * for the proper type.
  4604. */
  4605. if (!block_group_bits(block_group, data)) {
  4606. u64 extra = BTRFS_BLOCK_GROUP_DUP |
  4607. BTRFS_BLOCK_GROUP_RAID1 |
  4608. BTRFS_BLOCK_GROUP_RAID10;
  4609. /*
  4610. * if they asked for extra copies and this block group
  4611. * doesn't provide them, bail. This does allow us to
  4612. * fill raid0 from raid1.
  4613. */
  4614. if ((data & extra) && !(block_group->flags & extra))
  4615. goto loop;
  4616. }
  4617. have_block_group:
  4618. cached = block_group_cache_done(block_group);
  4619. if (unlikely(!cached)) {
  4620. u64 free_percent;
  4621. found_uncached_bg = true;
  4622. ret = cache_block_group(block_group, trans,
  4623. orig_root, 1);
  4624. if (block_group->cached == BTRFS_CACHE_FINISHED)
  4625. goto alloc;
  4626. free_percent = btrfs_block_group_used(&block_group->item);
  4627. free_percent *= 100;
  4628. free_percent = div64_u64(free_percent,
  4629. block_group->key.offset);
  4630. free_percent = 100 - free_percent;
  4631. if (free_percent > ideal_cache_percent &&
  4632. likely(!block_group->ro)) {
  4633. ideal_cache_offset = block_group->key.objectid;
  4634. ideal_cache_percent = free_percent;
  4635. }
  4636. /*
  4637. * The caching workers are limited to 2 threads, so we
  4638. * can queue as much work as we care to.
  4639. */
  4640. if (loop > LOOP_FIND_IDEAL) {
  4641. ret = cache_block_group(block_group, trans,
  4642. orig_root, 0);
  4643. BUG_ON(ret);
  4644. }
  4645. /*
  4646. * If loop is set for cached only, try the next block
  4647. * group.
  4648. */
  4649. if (loop == LOOP_FIND_IDEAL)
  4650. goto loop;
  4651. }
  4652. alloc:
  4653. if (unlikely(block_group->ro))
  4654. goto loop;
  4655. spin_lock(&block_group->free_space_ctl->tree_lock);
  4656. if (cached &&
  4657. block_group->free_space_ctl->free_space <
  4658. num_bytes + empty_cluster + empty_size) {
  4659. spin_unlock(&block_group->free_space_ctl->tree_lock);
  4660. goto loop;
  4661. }
  4662. spin_unlock(&block_group->free_space_ctl->tree_lock);
  4663. /*
  4664. * Ok we want to try and use the cluster allocator, so
  4665. * lets look there
  4666. */
  4667. if (last_ptr) {
  4668. /*
  4669. * the refill lock keeps out other
  4670. * people trying to start a new cluster
  4671. */
  4672. spin_lock(&last_ptr->refill_lock);
  4673. used_block_group = last_ptr->block_group;
  4674. if (used_block_group != block_group &&
  4675. (!used_block_group ||
  4676. used_block_group->ro ||
  4677. !block_group_bits(used_block_group, data))) {
  4678. used_block_group = block_group;
  4679. goto refill_cluster;
  4680. }
  4681. if (used_block_group != block_group)
  4682. btrfs_get_block_group(used_block_group);
  4683. offset = btrfs_alloc_from_cluster(used_block_group,
  4684. last_ptr, num_bytes, used_block_group->key.objectid);
  4685. if (offset) {
  4686. /* we have a block, we're done */
  4687. spin_unlock(&last_ptr->refill_lock);
  4688. goto checks;
  4689. }
  4690. WARN_ON(last_ptr->block_group != used_block_group);
  4691. if (used_block_group != block_group) {
  4692. btrfs_put_block_group(used_block_group);
  4693. used_block_group = block_group;
  4694. }
  4695. refill_cluster:
  4696. BUG_ON(used_block_group != block_group);
  4697. /* If we are on LOOP_NO_EMPTY_SIZE, we can't
  4698. * set up a new clusters, so lets just skip it
  4699. * and let the allocator find whatever block
  4700. * it can find. If we reach this point, we
  4701. * will have tried the cluster allocator
  4702. * plenty of times and not have found
  4703. * anything, so we are likely way too
  4704. * fragmented for the clustering stuff to find
  4705. * anything. */
  4706. if (loop >= LOOP_NO_EMPTY_SIZE) {
  4707. spin_unlock(&last_ptr->refill_lock);
  4708. goto unclustered_alloc;
  4709. }
  4710. /*
  4711. * this cluster didn't work out, free it and
  4712. * start over
  4713. */
  4714. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  4715. /* allocate a cluster in this block group */
  4716. ret = btrfs_find_space_cluster(trans, root,
  4717. block_group, last_ptr,
  4718. search_start, num_bytes,
  4719. empty_cluster + empty_size);
  4720. if (ret == 0) {
  4721. /*
  4722. * now pull our allocation out of this
  4723. * cluster
  4724. */
  4725. offset = btrfs_alloc_from_cluster(block_group,
  4726. last_ptr, num_bytes,
  4727. search_start);
  4728. if (offset) {
  4729. /* we found one, proceed */
  4730. spin_unlock(&last_ptr->refill_lock);
  4731. goto checks;
  4732. }
  4733. } else if (!cached && loop > LOOP_CACHING_NOWAIT
  4734. && !failed_cluster_refill) {
  4735. spin_unlock(&last_ptr->refill_lock);
  4736. failed_cluster_refill = true;
  4737. wait_block_group_cache_progress(block_group,
  4738. num_bytes + empty_cluster + empty_size);
  4739. goto have_block_group;
  4740. }
  4741. /*
  4742. * at this point we either didn't find a cluster
  4743. * or we weren't able to allocate a block from our
  4744. * cluster. Free the cluster we've been trying
  4745. * to use, and go to the next block group
  4746. */
  4747. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  4748. spin_unlock(&last_ptr->refill_lock);
  4749. goto loop;
  4750. }
  4751. unclustered_alloc:
  4752. offset = btrfs_find_space_for_alloc(block_group, search_start,
  4753. num_bytes, empty_size);
  4754. /*
  4755. * If we didn't find a chunk, and we haven't failed on this
  4756. * block group before, and this block group is in the middle of
  4757. * caching and we are ok with waiting, then go ahead and wait
  4758. * for progress to be made, and set failed_alloc to true.
  4759. *
  4760. * If failed_alloc is true then we've already waited on this
  4761. * block group once and should move on to the next block group.
  4762. */
  4763. if (!offset && !failed_alloc && !cached &&
  4764. loop > LOOP_CACHING_NOWAIT) {
  4765. wait_block_group_cache_progress(block_group,
  4766. num_bytes + empty_size);
  4767. failed_alloc = true;
  4768. goto have_block_group;
  4769. } else if (!offset) {
  4770. if (!cached)
  4771. have_caching_bg = true;
  4772. goto loop;
  4773. }
  4774. checks:
  4775. search_start = stripe_align(root, offset);
  4776. /* move on to the next group */
  4777. if (search_start + num_bytes >= search_end) {
  4778. btrfs_add_free_space(used_block_group, offset, num_bytes);
  4779. goto loop;
  4780. }
  4781. /* move on to the next group */
  4782. if (search_start + num_bytes >
  4783. used_block_group->key.objectid + used_block_group->key.offset) {
  4784. btrfs_add_free_space(used_block_group, offset, num_bytes);
  4785. goto loop;
  4786. }
  4787. ins->objectid = search_start;
  4788. ins->offset = num_bytes;
  4789. if (offset < search_start)
  4790. btrfs_add_free_space(used_block_group, offset,
  4791. search_start - offset);
  4792. BUG_ON(offset > search_start);
  4793. ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
  4794. alloc_type);
  4795. if (ret == -EAGAIN) {
  4796. btrfs_add_free_space(used_block_group, offset, num_bytes);
  4797. goto loop;
  4798. }
  4799. /* we are all good, lets return */
  4800. ins->objectid = search_start;
  4801. ins->offset = num_bytes;
  4802. if (offset < search_start)
  4803. btrfs_add_free_space(used_block_group, offset,
  4804. search_start - offset);
  4805. BUG_ON(offset > search_start);
  4806. if (used_block_group != block_group)
  4807. btrfs_put_block_group(used_block_group);
  4808. btrfs_put_block_group(block_group);
  4809. break;
  4810. loop:
  4811. failed_cluster_refill = false;
  4812. failed_alloc = false;
  4813. BUG_ON(index != get_block_group_index(block_group));
  4814. if (used_block_group != block_group)
  4815. btrfs_put_block_group(used_block_group);
  4816. btrfs_put_block_group(block_group);
  4817. }
  4818. up_read(&space_info->groups_sem);
  4819. if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
  4820. goto search;
  4821. if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
  4822. goto search;
  4823. /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
  4824. * for them to make caching progress. Also
  4825. * determine the best possible bg to cache
  4826. * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
  4827. * caching kthreads as we move along
  4828. * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
  4829. * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
  4830. * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
  4831. * again
  4832. */
  4833. if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
  4834. index = 0;
  4835. if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
  4836. found_uncached_bg = false;
  4837. loop++;
  4838. if (!ideal_cache_percent)
  4839. goto search;
  4840. /*
  4841. * 1 of the following 2 things have happened so far
  4842. *
  4843. * 1) We found an ideal block group for caching that
  4844. * is mostly full and will cache quickly, so we might
  4845. * as well wait for it.
  4846. *
  4847. * 2) We searched for cached only and we didn't find
  4848. * anything, and we didn't start any caching kthreads
  4849. * either, so chances are we will loop through and
  4850. * start a couple caching kthreads, and then come back
  4851. * around and just wait for them. This will be slower
  4852. * because we will have 2 caching kthreads reading at
  4853. * the same time when we could have just started one
  4854. * and waited for it to get far enough to give us an
  4855. * allocation, so go ahead and go to the wait caching
  4856. * loop.
  4857. */
  4858. loop = LOOP_CACHING_WAIT;
  4859. search_start = ideal_cache_offset;
  4860. ideal_cache_percent = 0;
  4861. goto ideal_cache;
  4862. } else if (loop == LOOP_FIND_IDEAL) {
  4863. /*
  4864. * Didn't find a uncached bg, wait on anything we find
  4865. * next.
  4866. */
  4867. loop = LOOP_CACHING_WAIT;
  4868. goto search;
  4869. }
  4870. loop++;
  4871. if (loop == LOOP_ALLOC_CHUNK) {
  4872. if (allowed_chunk_alloc) {
  4873. ret = do_chunk_alloc(trans, root, num_bytes +
  4874. 2 * 1024 * 1024, data,
  4875. CHUNK_ALLOC_LIMITED);
  4876. allowed_chunk_alloc = 0;
  4877. if (ret == 1)
  4878. done_chunk_alloc = 1;
  4879. } else if (!done_chunk_alloc &&
  4880. space_info->force_alloc ==
  4881. CHUNK_ALLOC_NO_FORCE) {
  4882. space_info->force_alloc = CHUNK_ALLOC_LIMITED;
  4883. }
  4884. /*
  4885. * We didn't allocate a chunk, go ahead and drop the
  4886. * empty size and loop again.
  4887. */
  4888. if (!done_chunk_alloc)
  4889. loop = LOOP_NO_EMPTY_SIZE;
  4890. }
  4891. if (loop == LOOP_NO_EMPTY_SIZE) {
  4892. empty_size = 0;
  4893. empty_cluster = 0;
  4894. }
  4895. goto search;
  4896. } else if (!ins->objectid) {
  4897. ret = -ENOSPC;
  4898. } else if (ins->objectid) {
  4899. ret = 0;
  4900. }
  4901. return ret;
  4902. }
  4903. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  4904. int dump_block_groups)
  4905. {
  4906. struct btrfs_block_group_cache *cache;
  4907. int index = 0;
  4908. spin_lock(&info->lock);
  4909. printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
  4910. (unsigned long long)info->flags,
  4911. (unsigned long long)(info->total_bytes - info->bytes_used -
  4912. info->bytes_pinned - info->bytes_reserved -
  4913. info->bytes_readonly),
  4914. (info->full) ? "" : "not ");
  4915. printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
  4916. "reserved=%llu, may_use=%llu, readonly=%llu\n",
  4917. (unsigned long long)info->total_bytes,
  4918. (unsigned long long)info->bytes_used,
  4919. (unsigned long long)info->bytes_pinned,
  4920. (unsigned long long)info->bytes_reserved,
  4921. (unsigned long long)info->bytes_may_use,
  4922. (unsigned long long)info->bytes_readonly);
  4923. spin_unlock(&info->lock);
  4924. if (!dump_block_groups)
  4925. return;
  4926. down_read(&info->groups_sem);
  4927. again:
  4928. list_for_each_entry(cache, &info->block_groups[index], list) {
  4929. spin_lock(&cache->lock);
  4930. printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
  4931. "%llu pinned %llu reserved\n",
  4932. (unsigned long long)cache->key.objectid,
  4933. (unsigned long long)cache->key.offset,
  4934. (unsigned long long)btrfs_block_group_used(&cache->item),
  4935. (unsigned long long)cache->pinned,
  4936. (unsigned long long)cache->reserved);
  4937. btrfs_dump_free_space(cache, bytes);
  4938. spin_unlock(&cache->lock);
  4939. }
  4940. if (++index < BTRFS_NR_RAID_TYPES)
  4941. goto again;
  4942. up_read(&info->groups_sem);
  4943. }
  4944. int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
  4945. struct btrfs_root *root,
  4946. u64 num_bytes, u64 min_alloc_size,
  4947. u64 empty_size, u64 hint_byte,
  4948. u64 search_end, struct btrfs_key *ins,
  4949. u64 data)
  4950. {
  4951. int ret;
  4952. u64 search_start = 0;
  4953. data = btrfs_get_alloc_profile(root, data);
  4954. again:
  4955. /*
  4956. * the only place that sets empty_size is btrfs_realloc_node, which
  4957. * is not called recursively on allocations
  4958. */
  4959. if (empty_size || root->ref_cows)
  4960. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  4961. num_bytes + 2 * 1024 * 1024, data,
  4962. CHUNK_ALLOC_NO_FORCE);
  4963. WARN_ON(num_bytes < root->sectorsize);
  4964. ret = find_free_extent(trans, root, num_bytes, empty_size,
  4965. search_start, search_end, hint_byte,
  4966. ins, data);
  4967. if (ret == -ENOSPC && num_bytes > min_alloc_size) {
  4968. num_bytes = num_bytes >> 1;
  4969. num_bytes = num_bytes & ~(root->sectorsize - 1);
  4970. num_bytes = max(num_bytes, min_alloc_size);
  4971. do_chunk_alloc(trans, root->fs_info->extent_root,
  4972. num_bytes, data, CHUNK_ALLOC_FORCE);
  4973. goto again;
  4974. }
  4975. if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
  4976. struct btrfs_space_info *sinfo;
  4977. sinfo = __find_space_info(root->fs_info, data);
  4978. printk(KERN_ERR "btrfs allocation failed flags %llu, "
  4979. "wanted %llu\n", (unsigned long long)data,
  4980. (unsigned long long)num_bytes);
  4981. dump_space_info(sinfo, num_bytes, 1);
  4982. }
  4983. trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
  4984. return ret;
  4985. }
  4986. static int __btrfs_free_reserved_extent(struct btrfs_root *root,
  4987. u64 start, u64 len, int pin)
  4988. {
  4989. struct btrfs_block_group_cache *cache;
  4990. int ret = 0;
  4991. cache = btrfs_lookup_block_group(root->fs_info, start);
  4992. if (!cache) {
  4993. printk(KERN_ERR "Unable to find block group for %llu\n",
  4994. (unsigned long long)start);
  4995. return -ENOSPC;
  4996. }
  4997. if (btrfs_test_opt(root, DISCARD))
  4998. ret = btrfs_discard_extent(root, start, len, NULL);
  4999. if (pin)
  5000. pin_down_extent(root, cache, start, len, 1);
  5001. else {
  5002. btrfs_add_free_space(cache, start, len);
  5003. btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
  5004. }
  5005. btrfs_put_block_group(cache);
  5006. trace_btrfs_reserved_extent_free(root, start, len);
  5007. return ret;
  5008. }
  5009. int btrfs_free_reserved_extent(struct btrfs_root *root,
  5010. u64 start, u64 len)
  5011. {
  5012. return __btrfs_free_reserved_extent(root, start, len, 0);
  5013. }
  5014. int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
  5015. u64 start, u64 len)
  5016. {
  5017. return __btrfs_free_reserved_extent(root, start, len, 1);
  5018. }
  5019. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  5020. struct btrfs_root *root,
  5021. u64 parent, u64 root_objectid,
  5022. u64 flags, u64 owner, u64 offset,
  5023. struct btrfs_key *ins, int ref_mod)
  5024. {
  5025. int ret;
  5026. struct btrfs_fs_info *fs_info = root->fs_info;
  5027. struct btrfs_extent_item *extent_item;
  5028. struct btrfs_extent_inline_ref *iref;
  5029. struct btrfs_path *path;
  5030. struct extent_buffer *leaf;
  5031. int type;
  5032. u32 size;
  5033. if (parent > 0)
  5034. type = BTRFS_SHARED_DATA_REF_KEY;
  5035. else
  5036. type = BTRFS_EXTENT_DATA_REF_KEY;
  5037. size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
  5038. path = btrfs_alloc_path();
  5039. if (!path)
  5040. return -ENOMEM;
  5041. path->leave_spinning = 1;
  5042. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  5043. ins, size);
  5044. BUG_ON(ret);
  5045. leaf = path->nodes[0];
  5046. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  5047. struct btrfs_extent_item);
  5048. btrfs_set_extent_refs(leaf, extent_item, ref_mod);
  5049. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  5050. btrfs_set_extent_flags(leaf, extent_item,
  5051. flags | BTRFS_EXTENT_FLAG_DATA);
  5052. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  5053. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  5054. if (parent > 0) {
  5055. struct btrfs_shared_data_ref *ref;
  5056. ref = (struct btrfs_shared_data_ref *)(iref + 1);
  5057. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  5058. btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
  5059. } else {
  5060. struct btrfs_extent_data_ref *ref;
  5061. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  5062. btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
  5063. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  5064. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  5065. btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
  5066. }
  5067. btrfs_mark_buffer_dirty(path->nodes[0]);
  5068. btrfs_free_path(path);
  5069. ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
  5070. if (ret) {
  5071. printk(KERN_ERR "btrfs update block group failed for %llu "
  5072. "%llu\n", (unsigned long long)ins->objectid,
  5073. (unsigned long long)ins->offset);
  5074. BUG();
  5075. }
  5076. return ret;
  5077. }
  5078. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  5079. struct btrfs_root *root,
  5080. u64 parent, u64 root_objectid,
  5081. u64 flags, struct btrfs_disk_key *key,
  5082. int level, struct btrfs_key *ins)
  5083. {
  5084. int ret;
  5085. struct btrfs_fs_info *fs_info = root->fs_info;
  5086. struct btrfs_extent_item *extent_item;
  5087. struct btrfs_tree_block_info *block_info;
  5088. struct btrfs_extent_inline_ref *iref;
  5089. struct btrfs_path *path;
  5090. struct extent_buffer *leaf;
  5091. u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
  5092. path = btrfs_alloc_path();
  5093. if (!path)
  5094. return -ENOMEM;
  5095. path->leave_spinning = 1;
  5096. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  5097. ins, size);
  5098. BUG_ON(ret);
  5099. leaf = path->nodes[0];
  5100. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  5101. struct btrfs_extent_item);
  5102. btrfs_set_extent_refs(leaf, extent_item, 1);
  5103. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  5104. btrfs_set_extent_flags(leaf, extent_item,
  5105. flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
  5106. block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
  5107. btrfs_set_tree_block_key(leaf, block_info, key);
  5108. btrfs_set_tree_block_level(leaf, block_info, level);
  5109. iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
  5110. if (parent > 0) {
  5111. BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
  5112. btrfs_set_extent_inline_ref_type(leaf, iref,
  5113. BTRFS_SHARED_BLOCK_REF_KEY);
  5114. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  5115. } else {
  5116. btrfs_set_extent_inline_ref_type(leaf, iref,
  5117. BTRFS_TREE_BLOCK_REF_KEY);
  5118. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  5119. }
  5120. btrfs_mark_buffer_dirty(leaf);
  5121. btrfs_free_path(path);
  5122. ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
  5123. if (ret) {
  5124. printk(KERN_ERR "btrfs update block group failed for %llu "
  5125. "%llu\n", (unsigned long long)ins->objectid,
  5126. (unsigned long long)ins->offset);
  5127. BUG();
  5128. }
  5129. return ret;
  5130. }
  5131. int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  5132. struct btrfs_root *root,
  5133. u64 root_objectid, u64 owner,
  5134. u64 offset, struct btrfs_key *ins)
  5135. {
  5136. int ret;
  5137. BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
  5138. ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
  5139. 0, root_objectid, owner, offset,
  5140. BTRFS_ADD_DELAYED_EXTENT, NULL);
  5141. return ret;
  5142. }
  5143. /*
  5144. * this is used by the tree logging recovery code. It records that
  5145. * an extent has been allocated and makes sure to clear the free
  5146. * space cache bits as well
  5147. */
  5148. int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
  5149. struct btrfs_root *root,
  5150. u64 root_objectid, u64 owner, u64 offset,
  5151. struct btrfs_key *ins)
  5152. {
  5153. int ret;
  5154. struct btrfs_block_group_cache *block_group;
  5155. struct btrfs_caching_control *caching_ctl;
  5156. u64 start = ins->objectid;
  5157. u64 num_bytes = ins->offset;
  5158. block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
  5159. cache_block_group(block_group, trans, NULL, 0);
  5160. caching_ctl = get_caching_control(block_group);
  5161. if (!caching_ctl) {
  5162. BUG_ON(!block_group_cache_done(block_group));
  5163. ret = btrfs_remove_free_space(block_group, start, num_bytes);
  5164. BUG_ON(ret);
  5165. } else {
  5166. mutex_lock(&caching_ctl->mutex);
  5167. if (start >= caching_ctl->progress) {
  5168. ret = add_excluded_extent(root, start, num_bytes);
  5169. BUG_ON(ret);
  5170. } else if (start + num_bytes <= caching_ctl->progress) {
  5171. ret = btrfs_remove_free_space(block_group,
  5172. start, num_bytes);
  5173. BUG_ON(ret);
  5174. } else {
  5175. num_bytes = caching_ctl->progress - start;
  5176. ret = btrfs_remove_free_space(block_group,
  5177. start, num_bytes);
  5178. BUG_ON(ret);
  5179. start = caching_ctl->progress;
  5180. num_bytes = ins->objectid + ins->offset -
  5181. caching_ctl->progress;
  5182. ret = add_excluded_extent(root, start, num_bytes);
  5183. BUG_ON(ret);
  5184. }
  5185. mutex_unlock(&caching_ctl->mutex);
  5186. put_caching_control(caching_ctl);
  5187. }
  5188. ret = btrfs_update_reserved_bytes(block_group, ins->offset,
  5189. RESERVE_ALLOC_NO_ACCOUNT);
  5190. BUG_ON(ret);
  5191. btrfs_put_block_group(block_group);
  5192. ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
  5193. 0, owner, offset, ins, 1);
  5194. return ret;
  5195. }
  5196. struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
  5197. struct btrfs_root *root,
  5198. u64 bytenr, u32 blocksize,
  5199. int level)
  5200. {
  5201. struct extent_buffer *buf;
  5202. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  5203. if (!buf)
  5204. return ERR_PTR(-ENOMEM);
  5205. btrfs_set_header_generation(buf, trans->transid);
  5206. btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
  5207. btrfs_tree_lock(buf);
  5208. clean_tree_block(trans, root, buf);
  5209. btrfs_set_lock_blocking(buf);
  5210. btrfs_set_buffer_uptodate(buf);
  5211. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  5212. /*
  5213. * we allow two log transactions at a time, use different
  5214. * EXENT bit to differentiate dirty pages.
  5215. */
  5216. if (root->log_transid % 2 == 0)
  5217. set_extent_dirty(&root->dirty_log_pages, buf->start,
  5218. buf->start + buf->len - 1, GFP_NOFS);
  5219. else
  5220. set_extent_new(&root->dirty_log_pages, buf->start,
  5221. buf->start + buf->len - 1, GFP_NOFS);
  5222. } else {
  5223. set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
  5224. buf->start + buf->len - 1, GFP_NOFS);
  5225. }
  5226. trans->blocks_used++;
  5227. /* this returns a buffer locked for blocking */
  5228. return buf;
  5229. }
  5230. static struct btrfs_block_rsv *
  5231. use_block_rsv(struct btrfs_trans_handle *trans,
  5232. struct btrfs_root *root, u32 blocksize)
  5233. {
  5234. struct btrfs_block_rsv *block_rsv;
  5235. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  5236. int ret;
  5237. block_rsv = get_block_rsv(trans, root);
  5238. if (block_rsv->size == 0) {
  5239. ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
  5240. /*
  5241. * If we couldn't reserve metadata bytes try and use some from
  5242. * the global reserve.
  5243. */
  5244. if (ret && block_rsv != global_rsv) {
  5245. ret = block_rsv_use_bytes(global_rsv, blocksize);
  5246. if (!ret)
  5247. return global_rsv;
  5248. return ERR_PTR(ret);
  5249. } else if (ret) {
  5250. return ERR_PTR(ret);
  5251. }
  5252. return block_rsv;
  5253. }
  5254. ret = block_rsv_use_bytes(block_rsv, blocksize);
  5255. if (!ret)
  5256. return block_rsv;
  5257. if (ret) {
  5258. static DEFINE_RATELIMIT_STATE(_rs,
  5259. DEFAULT_RATELIMIT_INTERVAL,
  5260. /*DEFAULT_RATELIMIT_BURST*/ 2);
  5261. if (__ratelimit(&_rs)) {
  5262. printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
  5263. WARN_ON(1);
  5264. }
  5265. ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
  5266. if (!ret) {
  5267. return block_rsv;
  5268. } else if (ret && block_rsv != global_rsv) {
  5269. ret = block_rsv_use_bytes(global_rsv, blocksize);
  5270. if (!ret)
  5271. return global_rsv;
  5272. }
  5273. }
  5274. return ERR_PTR(-ENOSPC);
  5275. }
  5276. static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
  5277. {
  5278. block_rsv_add_bytes(block_rsv, blocksize, 0);
  5279. block_rsv_release_bytes(block_rsv, NULL, 0);
  5280. }
  5281. /*
  5282. * finds a free extent and does all the dirty work required for allocation
  5283. * returns the key for the extent through ins, and a tree buffer for
  5284. * the first block of the extent through buf.
  5285. *
  5286. * returns the tree buffer or NULL.
  5287. */
  5288. struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
  5289. struct btrfs_root *root, u32 blocksize,
  5290. u64 parent, u64 root_objectid,
  5291. struct btrfs_disk_key *key, int level,
  5292. u64 hint, u64 empty_size)
  5293. {
  5294. struct btrfs_key ins;
  5295. struct btrfs_block_rsv *block_rsv;
  5296. struct extent_buffer *buf;
  5297. u64 flags = 0;
  5298. int ret;
  5299. block_rsv = use_block_rsv(trans, root, blocksize);
  5300. if (IS_ERR(block_rsv))
  5301. return ERR_CAST(block_rsv);
  5302. ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
  5303. empty_size, hint, (u64)-1, &ins, 0);
  5304. if (ret) {
  5305. unuse_block_rsv(block_rsv, blocksize);
  5306. return ERR_PTR(ret);
  5307. }
  5308. buf = btrfs_init_new_buffer(trans, root, ins.objectid,
  5309. blocksize, level);
  5310. BUG_ON(IS_ERR(buf));
  5311. if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
  5312. if (parent == 0)
  5313. parent = ins.objectid;
  5314. flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
  5315. } else
  5316. BUG_ON(parent > 0);
  5317. if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
  5318. struct btrfs_delayed_extent_op *extent_op;
  5319. extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
  5320. BUG_ON(!extent_op);
  5321. if (key)
  5322. memcpy(&extent_op->key, key, sizeof(extent_op->key));
  5323. else
  5324. memset(&extent_op->key, 0, sizeof(extent_op->key));
  5325. extent_op->flags_to_set = flags;
  5326. extent_op->update_key = 1;
  5327. extent_op->update_flags = 1;
  5328. extent_op->is_data = 0;
  5329. ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
  5330. ins.offset, parent, root_objectid,
  5331. level, BTRFS_ADD_DELAYED_EXTENT,
  5332. extent_op);
  5333. BUG_ON(ret);
  5334. }
  5335. return buf;
  5336. }
  5337. struct walk_control {
  5338. u64 refs[BTRFS_MAX_LEVEL];
  5339. u64 flags[BTRFS_MAX_LEVEL];
  5340. struct btrfs_key update_progress;
  5341. int stage;
  5342. int level;
  5343. int shared_level;
  5344. int update_ref;
  5345. int keep_locks;
  5346. int reada_slot;
  5347. int reada_count;
  5348. };
  5349. #define DROP_REFERENCE 1
  5350. #define UPDATE_BACKREF 2
  5351. static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
  5352. struct btrfs_root *root,
  5353. struct walk_control *wc,
  5354. struct btrfs_path *path)
  5355. {
  5356. u64 bytenr;
  5357. u64 generation;
  5358. u64 refs;
  5359. u64 flags;
  5360. u32 nritems;
  5361. u32 blocksize;
  5362. struct btrfs_key key;
  5363. struct extent_buffer *eb;
  5364. int ret;
  5365. int slot;
  5366. int nread = 0;
  5367. if (path->slots[wc->level] < wc->reada_slot) {
  5368. wc->reada_count = wc->reada_count * 2 / 3;
  5369. wc->reada_count = max(wc->reada_count, 2);
  5370. } else {
  5371. wc->reada_count = wc->reada_count * 3 / 2;
  5372. wc->reada_count = min_t(int, wc->reada_count,
  5373. BTRFS_NODEPTRS_PER_BLOCK(root));
  5374. }
  5375. eb = path->nodes[wc->level];
  5376. nritems = btrfs_header_nritems(eb);
  5377. blocksize = btrfs_level_size(root, wc->level - 1);
  5378. for (slot = path->slots[wc->level]; slot < nritems; slot++) {
  5379. if (nread >= wc->reada_count)
  5380. break;
  5381. cond_resched();
  5382. bytenr = btrfs_node_blockptr(eb, slot);
  5383. generation = btrfs_node_ptr_generation(eb, slot);
  5384. if (slot == path->slots[wc->level])
  5385. goto reada;
  5386. if (wc->stage == UPDATE_BACKREF &&
  5387. generation <= root->root_key.offset)
  5388. continue;
  5389. /* We don't lock the tree block, it's OK to be racy here */
  5390. ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
  5391. &refs, &flags);
  5392. BUG_ON(ret);
  5393. BUG_ON(refs == 0);
  5394. if (wc->stage == DROP_REFERENCE) {
  5395. if (refs == 1)
  5396. goto reada;
  5397. if (wc->level == 1 &&
  5398. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5399. continue;
  5400. if (!wc->update_ref ||
  5401. generation <= root->root_key.offset)
  5402. continue;
  5403. btrfs_node_key_to_cpu(eb, &key, slot);
  5404. ret = btrfs_comp_cpu_keys(&key,
  5405. &wc->update_progress);
  5406. if (ret < 0)
  5407. continue;
  5408. } else {
  5409. if (wc->level == 1 &&
  5410. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5411. continue;
  5412. }
  5413. reada:
  5414. ret = readahead_tree_block(root, bytenr, blocksize,
  5415. generation);
  5416. if (ret)
  5417. break;
  5418. nread++;
  5419. }
  5420. wc->reada_slot = slot;
  5421. }
  5422. /*
  5423. * hepler to process tree block while walking down the tree.
  5424. *
  5425. * when wc->stage == UPDATE_BACKREF, this function updates
  5426. * back refs for pointers in the block.
  5427. *
  5428. * NOTE: return value 1 means we should stop walking down.
  5429. */
  5430. static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
  5431. struct btrfs_root *root,
  5432. struct btrfs_path *path,
  5433. struct walk_control *wc, int lookup_info)
  5434. {
  5435. int level = wc->level;
  5436. struct extent_buffer *eb = path->nodes[level];
  5437. u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  5438. int ret;
  5439. if (wc->stage == UPDATE_BACKREF &&
  5440. btrfs_header_owner(eb) != root->root_key.objectid)
  5441. return 1;
  5442. /*
  5443. * when reference count of tree block is 1, it won't increase
  5444. * again. once full backref flag is set, we never clear it.
  5445. */
  5446. if (lookup_info &&
  5447. ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
  5448. (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
  5449. BUG_ON(!path->locks[level]);
  5450. ret = btrfs_lookup_extent_info(trans, root,
  5451. eb->start, eb->len,
  5452. &wc->refs[level],
  5453. &wc->flags[level]);
  5454. BUG_ON(ret);
  5455. BUG_ON(wc->refs[level] == 0);
  5456. }
  5457. if (wc->stage == DROP_REFERENCE) {
  5458. if (wc->refs[level] > 1)
  5459. return 1;
  5460. if (path->locks[level] && !wc->keep_locks) {
  5461. btrfs_tree_unlock_rw(eb, path->locks[level]);
  5462. path->locks[level] = 0;
  5463. }
  5464. return 0;
  5465. }
  5466. /* wc->stage == UPDATE_BACKREF */
  5467. if (!(wc->flags[level] & flag)) {
  5468. BUG_ON(!path->locks[level]);
  5469. ret = btrfs_inc_ref(trans, root, eb, 1);
  5470. BUG_ON(ret);
  5471. ret = btrfs_dec_ref(trans, root, eb, 0);
  5472. BUG_ON(ret);
  5473. ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
  5474. eb->len, flag, 0);
  5475. BUG_ON(ret);
  5476. wc->flags[level] |= flag;
  5477. }
  5478. /*
  5479. * the block is shared by multiple trees, so it's not good to
  5480. * keep the tree lock
  5481. */
  5482. if (path->locks[level] && level > 0) {
  5483. btrfs_tree_unlock_rw(eb, path->locks[level]);
  5484. path->locks[level] = 0;
  5485. }
  5486. return 0;
  5487. }
  5488. /*
  5489. * hepler to process tree block pointer.
  5490. *
  5491. * when wc->stage == DROP_REFERENCE, this function checks
  5492. * reference count of the block pointed to. if the block
  5493. * is shared and we need update back refs for the subtree
  5494. * rooted at the block, this function changes wc->stage to
  5495. * UPDATE_BACKREF. if the block is shared and there is no
  5496. * need to update back, this function drops the reference
  5497. * to the block.
  5498. *
  5499. * NOTE: return value 1 means we should stop walking down.
  5500. */
  5501. static noinline int do_walk_down(struct btrfs_trans_handle *trans,
  5502. struct btrfs_root *root,
  5503. struct btrfs_path *path,
  5504. struct walk_control *wc, int *lookup_info)
  5505. {
  5506. u64 bytenr;
  5507. u64 generation;
  5508. u64 parent;
  5509. u32 blocksize;
  5510. struct btrfs_key key;
  5511. struct extent_buffer *next;
  5512. int level = wc->level;
  5513. int reada = 0;
  5514. int ret = 0;
  5515. generation = btrfs_node_ptr_generation(path->nodes[level],
  5516. path->slots[level]);
  5517. /*
  5518. * if the lower level block was created before the snapshot
  5519. * was created, we know there is no need to update back refs
  5520. * for the subtree
  5521. */
  5522. if (wc->stage == UPDATE_BACKREF &&
  5523. generation <= root->root_key.offset) {
  5524. *lookup_info = 1;
  5525. return 1;
  5526. }
  5527. bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
  5528. blocksize = btrfs_level_size(root, level - 1);
  5529. next = btrfs_find_tree_block(root, bytenr, blocksize);
  5530. if (!next) {
  5531. next = btrfs_find_create_tree_block(root, bytenr, blocksize);
  5532. if (!next)
  5533. return -ENOMEM;
  5534. reada = 1;
  5535. }
  5536. btrfs_tree_lock(next);
  5537. btrfs_set_lock_blocking(next);
  5538. ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
  5539. &wc->refs[level - 1],
  5540. &wc->flags[level - 1]);
  5541. BUG_ON(ret);
  5542. BUG_ON(wc->refs[level - 1] == 0);
  5543. *lookup_info = 0;
  5544. if (wc->stage == DROP_REFERENCE) {
  5545. if (wc->refs[level - 1] > 1) {
  5546. if (level == 1 &&
  5547. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5548. goto skip;
  5549. if (!wc->update_ref ||
  5550. generation <= root->root_key.offset)
  5551. goto skip;
  5552. btrfs_node_key_to_cpu(path->nodes[level], &key,
  5553. path->slots[level]);
  5554. ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
  5555. if (ret < 0)
  5556. goto skip;
  5557. wc->stage = UPDATE_BACKREF;
  5558. wc->shared_level = level - 1;
  5559. }
  5560. } else {
  5561. if (level == 1 &&
  5562. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5563. goto skip;
  5564. }
  5565. if (!btrfs_buffer_uptodate(next, generation)) {
  5566. btrfs_tree_unlock(next);
  5567. free_extent_buffer(next);
  5568. next = NULL;
  5569. *lookup_info = 1;
  5570. }
  5571. if (!next) {
  5572. if (reada && level == 1)
  5573. reada_walk_down(trans, root, wc, path);
  5574. next = read_tree_block(root, bytenr, blocksize, generation);
  5575. if (!next)
  5576. return -EIO;
  5577. btrfs_tree_lock(next);
  5578. btrfs_set_lock_blocking(next);
  5579. }
  5580. level--;
  5581. BUG_ON(level != btrfs_header_level(next));
  5582. path->nodes[level] = next;
  5583. path->slots[level] = 0;
  5584. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  5585. wc->level = level;
  5586. if (wc->level == 1)
  5587. wc->reada_slot = 0;
  5588. return 0;
  5589. skip:
  5590. wc->refs[level - 1] = 0;
  5591. wc->flags[level - 1] = 0;
  5592. if (wc->stage == DROP_REFERENCE) {
  5593. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
  5594. parent = path->nodes[level]->start;
  5595. } else {
  5596. BUG_ON(root->root_key.objectid !=
  5597. btrfs_header_owner(path->nodes[level]));
  5598. parent = 0;
  5599. }
  5600. ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
  5601. root->root_key.objectid, level - 1, 0);
  5602. BUG_ON(ret);
  5603. }
  5604. btrfs_tree_unlock(next);
  5605. free_extent_buffer(next);
  5606. *lookup_info = 1;
  5607. return 1;
  5608. }
  5609. /*
  5610. * hepler to process tree block while walking up the tree.
  5611. *
  5612. * when wc->stage == DROP_REFERENCE, this function drops
  5613. * reference count on the block.
  5614. *
  5615. * when wc->stage == UPDATE_BACKREF, this function changes
  5616. * wc->stage back to DROP_REFERENCE if we changed wc->stage
  5617. * to UPDATE_BACKREF previously while processing the block.
  5618. *
  5619. * NOTE: return value 1 means we should stop walking up.
  5620. */
  5621. static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
  5622. struct btrfs_root *root,
  5623. struct btrfs_path *path,
  5624. struct walk_control *wc)
  5625. {
  5626. int ret;
  5627. int level = wc->level;
  5628. struct extent_buffer *eb = path->nodes[level];
  5629. u64 parent = 0;
  5630. if (wc->stage == UPDATE_BACKREF) {
  5631. BUG_ON(wc->shared_level < level);
  5632. if (level < wc->shared_level)
  5633. goto out;
  5634. ret = find_next_key(path, level + 1, &wc->update_progress);
  5635. if (ret > 0)
  5636. wc->update_ref = 0;
  5637. wc->stage = DROP_REFERENCE;
  5638. wc->shared_level = -1;
  5639. path->slots[level] = 0;
  5640. /*
  5641. * check reference count again if the block isn't locked.
  5642. * we should start walking down the tree again if reference
  5643. * count is one.
  5644. */
  5645. if (!path->locks[level]) {
  5646. BUG_ON(level == 0);
  5647. btrfs_tree_lock(eb);
  5648. btrfs_set_lock_blocking(eb);
  5649. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  5650. ret = btrfs_lookup_extent_info(trans, root,
  5651. eb->start, eb->len,
  5652. &wc->refs[level],
  5653. &wc->flags[level]);
  5654. BUG_ON(ret);
  5655. BUG_ON(wc->refs[level] == 0);
  5656. if (wc->refs[level] == 1) {
  5657. btrfs_tree_unlock_rw(eb, path->locks[level]);
  5658. return 1;
  5659. }
  5660. }
  5661. }
  5662. /* wc->stage == DROP_REFERENCE */
  5663. BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
  5664. if (wc->refs[level] == 1) {
  5665. if (level == 0) {
  5666. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  5667. ret = btrfs_dec_ref(trans, root, eb, 1);
  5668. else
  5669. ret = btrfs_dec_ref(trans, root, eb, 0);
  5670. BUG_ON(ret);
  5671. }
  5672. /* make block locked assertion in clean_tree_block happy */
  5673. if (!path->locks[level] &&
  5674. btrfs_header_generation(eb) == trans->transid) {
  5675. btrfs_tree_lock(eb);
  5676. btrfs_set_lock_blocking(eb);
  5677. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  5678. }
  5679. clean_tree_block(trans, root, eb);
  5680. }
  5681. if (eb == root->node) {
  5682. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  5683. parent = eb->start;
  5684. else
  5685. BUG_ON(root->root_key.objectid !=
  5686. btrfs_header_owner(eb));
  5687. } else {
  5688. if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  5689. parent = path->nodes[level + 1]->start;
  5690. else
  5691. BUG_ON(root->root_key.objectid !=
  5692. btrfs_header_owner(path->nodes[level + 1]));
  5693. }
  5694. btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
  5695. out:
  5696. wc->refs[level] = 0;
  5697. wc->flags[level] = 0;
  5698. return 0;
  5699. }
  5700. static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
  5701. struct btrfs_root *root,
  5702. struct btrfs_path *path,
  5703. struct walk_control *wc)
  5704. {
  5705. int level = wc->level;
  5706. int lookup_info = 1;
  5707. int ret;
  5708. while (level >= 0) {
  5709. ret = walk_down_proc(trans, root, path, wc, lookup_info);
  5710. if (ret > 0)
  5711. break;
  5712. if (level == 0)
  5713. break;
  5714. if (path->slots[level] >=
  5715. btrfs_header_nritems(path->nodes[level]))
  5716. break;
  5717. ret = do_walk_down(trans, root, path, wc, &lookup_info);
  5718. if (ret > 0) {
  5719. path->slots[level]++;
  5720. continue;
  5721. } else if (ret < 0)
  5722. return ret;
  5723. level = wc->level;
  5724. }
  5725. return 0;
  5726. }
  5727. static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  5728. struct btrfs_root *root,
  5729. struct btrfs_path *path,
  5730. struct walk_control *wc, int max_level)
  5731. {
  5732. int level = wc->level;
  5733. int ret;
  5734. path->slots[level] = btrfs_header_nritems(path->nodes[level]);
  5735. while (level < max_level && path->nodes[level]) {
  5736. wc->level = level;
  5737. if (path->slots[level] + 1 <
  5738. btrfs_header_nritems(path->nodes[level])) {
  5739. path->slots[level]++;
  5740. return 0;
  5741. } else {
  5742. ret = walk_up_proc(trans, root, path, wc);
  5743. if (ret > 0)
  5744. return 0;
  5745. if (path->locks[level]) {
  5746. btrfs_tree_unlock_rw(path->nodes[level],
  5747. path->locks[level]);
  5748. path->locks[level] = 0;
  5749. }
  5750. free_extent_buffer(path->nodes[level]);
  5751. path->nodes[level] = NULL;
  5752. level++;
  5753. }
  5754. }
  5755. return 1;
  5756. }
  5757. /*
  5758. * drop a subvolume tree.
  5759. *
  5760. * this function traverses the tree freeing any blocks that only
  5761. * referenced by the tree.
  5762. *
  5763. * when a shared tree block is found. this function decreases its
  5764. * reference count by one. if update_ref is true, this function
  5765. * also make sure backrefs for the shared block and all lower level
  5766. * blocks are properly updated.
  5767. */
  5768. void btrfs_drop_snapshot(struct btrfs_root *root,
  5769. struct btrfs_block_rsv *block_rsv, int update_ref)
  5770. {
  5771. struct btrfs_path *path;
  5772. struct btrfs_trans_handle *trans;
  5773. struct btrfs_root *tree_root = root->fs_info->tree_root;
  5774. struct btrfs_root_item *root_item = &root->root_item;
  5775. struct walk_control *wc;
  5776. struct btrfs_key key;
  5777. int err = 0;
  5778. int ret;
  5779. int level;
  5780. path = btrfs_alloc_path();
  5781. if (!path) {
  5782. err = -ENOMEM;
  5783. goto out;
  5784. }
  5785. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  5786. if (!wc) {
  5787. btrfs_free_path(path);
  5788. err = -ENOMEM;
  5789. goto out;
  5790. }
  5791. trans = btrfs_start_transaction(tree_root, 0);
  5792. BUG_ON(IS_ERR(trans));
  5793. if (block_rsv)
  5794. trans->block_rsv = block_rsv;
  5795. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  5796. level = btrfs_header_level(root->node);
  5797. path->nodes[level] = btrfs_lock_root_node(root);
  5798. btrfs_set_lock_blocking(path->nodes[level]);
  5799. path->slots[level] = 0;
  5800. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  5801. memset(&wc->update_progress, 0,
  5802. sizeof(wc->update_progress));
  5803. } else {
  5804. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  5805. memcpy(&wc->update_progress, &key,
  5806. sizeof(wc->update_progress));
  5807. level = root_item->drop_level;
  5808. BUG_ON(level == 0);
  5809. path->lowest_level = level;
  5810. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  5811. path->lowest_level = 0;
  5812. if (ret < 0) {
  5813. err = ret;
  5814. goto out_free;
  5815. }
  5816. WARN_ON(ret > 0);
  5817. /*
  5818. * unlock our path, this is safe because only this
  5819. * function is allowed to delete this snapshot
  5820. */
  5821. btrfs_unlock_up_safe(path, 0);
  5822. level = btrfs_header_level(root->node);
  5823. while (1) {
  5824. btrfs_tree_lock(path->nodes[level]);
  5825. btrfs_set_lock_blocking(path->nodes[level]);
  5826. ret = btrfs_lookup_extent_info(trans, root,
  5827. path->nodes[level]->start,
  5828. path->nodes[level]->len,
  5829. &wc->refs[level],
  5830. &wc->flags[level]);
  5831. BUG_ON(ret);
  5832. BUG_ON(wc->refs[level] == 0);
  5833. if (level == root_item->drop_level)
  5834. break;
  5835. btrfs_tree_unlock(path->nodes[level]);
  5836. WARN_ON(wc->refs[level] != 1);
  5837. level--;
  5838. }
  5839. }
  5840. wc->level = level;
  5841. wc->shared_level = -1;
  5842. wc->stage = DROP_REFERENCE;
  5843. wc->update_ref = update_ref;
  5844. wc->keep_locks = 0;
  5845. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  5846. while (1) {
  5847. ret = walk_down_tree(trans, root, path, wc);
  5848. if (ret < 0) {
  5849. err = ret;
  5850. break;
  5851. }
  5852. ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
  5853. if (ret < 0) {
  5854. err = ret;
  5855. break;
  5856. }
  5857. if (ret > 0) {
  5858. BUG_ON(wc->stage != DROP_REFERENCE);
  5859. break;
  5860. }
  5861. if (wc->stage == DROP_REFERENCE) {
  5862. level = wc->level;
  5863. btrfs_node_key(path->nodes[level],
  5864. &root_item->drop_progress,
  5865. path->slots[level]);
  5866. root_item->drop_level = level;
  5867. }
  5868. BUG_ON(wc->level == 0);
  5869. if (btrfs_should_end_transaction(trans, tree_root)) {
  5870. ret = btrfs_update_root(trans, tree_root,
  5871. &root->root_key,
  5872. root_item);
  5873. BUG_ON(ret);
  5874. btrfs_end_transaction_throttle(trans, tree_root);
  5875. trans = btrfs_start_transaction(tree_root, 0);
  5876. BUG_ON(IS_ERR(trans));
  5877. if (block_rsv)
  5878. trans->block_rsv = block_rsv;
  5879. }
  5880. }
  5881. btrfs_release_path(path);
  5882. BUG_ON(err);
  5883. ret = btrfs_del_root(trans, tree_root, &root->root_key);
  5884. BUG_ON(ret);
  5885. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  5886. ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
  5887. NULL, NULL);
  5888. BUG_ON(ret < 0);
  5889. if (ret > 0) {
  5890. /* if we fail to delete the orphan item this time
  5891. * around, it'll get picked up the next time.
  5892. *
  5893. * The most common failure here is just -ENOENT.
  5894. */
  5895. btrfs_del_orphan_item(trans, tree_root,
  5896. root->root_key.objectid);
  5897. }
  5898. }
  5899. if (root->in_radix) {
  5900. btrfs_free_fs_root(tree_root->fs_info, root);
  5901. } else {
  5902. free_extent_buffer(root->node);
  5903. free_extent_buffer(root->commit_root);
  5904. kfree(root);
  5905. }
  5906. out_free:
  5907. btrfs_end_transaction_throttle(trans, tree_root);
  5908. kfree(wc);
  5909. btrfs_free_path(path);
  5910. out:
  5911. if (err)
  5912. btrfs_std_error(root->fs_info, err);
  5913. return;
  5914. }
  5915. /*
  5916. * drop subtree rooted at tree block 'node'.
  5917. *
  5918. * NOTE: this function will unlock and release tree block 'node'
  5919. */
  5920. int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
  5921. struct btrfs_root *root,
  5922. struct extent_buffer *node,
  5923. struct extent_buffer *parent)
  5924. {
  5925. struct btrfs_path *path;
  5926. struct walk_control *wc;
  5927. int level;
  5928. int parent_level;
  5929. int ret = 0;
  5930. int wret;
  5931. BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  5932. path = btrfs_alloc_path();
  5933. if (!path)
  5934. return -ENOMEM;
  5935. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  5936. if (!wc) {
  5937. btrfs_free_path(path);
  5938. return -ENOMEM;
  5939. }
  5940. btrfs_assert_tree_locked(parent);
  5941. parent_level = btrfs_header_level(parent);
  5942. extent_buffer_get(parent);
  5943. path->nodes[parent_level] = parent;
  5944. path->slots[parent_level] = btrfs_header_nritems(parent);
  5945. btrfs_assert_tree_locked(node);
  5946. level = btrfs_header_level(node);
  5947. path->nodes[level] = node;
  5948. path->slots[level] = 0;
  5949. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  5950. wc->refs[parent_level] = 1;
  5951. wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  5952. wc->level = level;
  5953. wc->shared_level = -1;
  5954. wc->stage = DROP_REFERENCE;
  5955. wc->update_ref = 0;
  5956. wc->keep_locks = 1;
  5957. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  5958. while (1) {
  5959. wret = walk_down_tree(trans, root, path, wc);
  5960. if (wret < 0) {
  5961. ret = wret;
  5962. break;
  5963. }
  5964. wret = walk_up_tree(trans, root, path, wc, parent_level);
  5965. if (wret < 0)
  5966. ret = wret;
  5967. if (wret != 0)
  5968. break;
  5969. }
  5970. kfree(wc);
  5971. btrfs_free_path(path);
  5972. return ret;
  5973. }
  5974. static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
  5975. {
  5976. u64 num_devices;
  5977. u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
  5978. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
  5979. /*
  5980. * we add in the count of missing devices because we want
  5981. * to make sure that any RAID levels on a degraded FS
  5982. * continue to be honored.
  5983. */
  5984. num_devices = root->fs_info->fs_devices->rw_devices +
  5985. root->fs_info->fs_devices->missing_devices;
  5986. if (num_devices == 1) {
  5987. stripped |= BTRFS_BLOCK_GROUP_DUP;
  5988. stripped = flags & ~stripped;
  5989. /* turn raid0 into single device chunks */
  5990. if (flags & BTRFS_BLOCK_GROUP_RAID0)
  5991. return stripped;
  5992. /* turn mirroring into duplication */
  5993. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  5994. BTRFS_BLOCK_GROUP_RAID10))
  5995. return stripped | BTRFS_BLOCK_GROUP_DUP;
  5996. return flags;
  5997. } else {
  5998. /* they already had raid on here, just return */
  5999. if (flags & stripped)
  6000. return flags;
  6001. stripped |= BTRFS_BLOCK_GROUP_DUP;
  6002. stripped = flags & ~stripped;
  6003. /* switch duplicated blocks with raid1 */
  6004. if (flags & BTRFS_BLOCK_GROUP_DUP)
  6005. return stripped | BTRFS_BLOCK_GROUP_RAID1;
  6006. /* turn single device chunks into raid0 */
  6007. return stripped | BTRFS_BLOCK_GROUP_RAID0;
  6008. }
  6009. return flags;
  6010. }
  6011. static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
  6012. {
  6013. struct btrfs_space_info *sinfo = cache->space_info;
  6014. u64 num_bytes;
  6015. u64 min_allocable_bytes;
  6016. int ret = -ENOSPC;
  6017. /*
  6018. * We need some metadata space and system metadata space for
  6019. * allocating chunks in some corner cases until we force to set
  6020. * it to be readonly.
  6021. */
  6022. if ((sinfo->flags &
  6023. (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
  6024. !force)
  6025. min_allocable_bytes = 1 * 1024 * 1024;
  6026. else
  6027. min_allocable_bytes = 0;
  6028. spin_lock(&sinfo->lock);
  6029. spin_lock(&cache->lock);
  6030. if (cache->ro) {
  6031. ret = 0;
  6032. goto out;
  6033. }
  6034. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  6035. cache->bytes_super - btrfs_block_group_used(&cache->item);
  6036. if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
  6037. sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
  6038. min_allocable_bytes <= sinfo->total_bytes) {
  6039. sinfo->bytes_readonly += num_bytes;
  6040. cache->ro = 1;
  6041. ret = 0;
  6042. }
  6043. out:
  6044. spin_unlock(&cache->lock);
  6045. spin_unlock(&sinfo->lock);
  6046. return ret;
  6047. }
  6048. int btrfs_set_block_group_ro(struct btrfs_root *root,
  6049. struct btrfs_block_group_cache *cache)
  6050. {
  6051. struct btrfs_trans_handle *trans;
  6052. u64 alloc_flags;
  6053. int ret;
  6054. BUG_ON(cache->ro);
  6055. trans = btrfs_join_transaction(root);
  6056. BUG_ON(IS_ERR(trans));
  6057. alloc_flags = update_block_group_flags(root, cache->flags);
  6058. if (alloc_flags != cache->flags)
  6059. do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
  6060. CHUNK_ALLOC_FORCE);
  6061. ret = set_block_group_ro(cache, 0);
  6062. if (!ret)
  6063. goto out;
  6064. alloc_flags = get_alloc_profile(root, cache->space_info->flags);
  6065. ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
  6066. CHUNK_ALLOC_FORCE);
  6067. if (ret < 0)
  6068. goto out;
  6069. ret = set_block_group_ro(cache, 0);
  6070. out:
  6071. btrfs_end_transaction(trans, root);
  6072. return ret;
  6073. }
  6074. int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
  6075. struct btrfs_root *root, u64 type)
  6076. {
  6077. u64 alloc_flags = get_alloc_profile(root, type);
  6078. return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
  6079. CHUNK_ALLOC_FORCE);
  6080. }
  6081. /*
  6082. * helper to account the unused space of all the readonly block group in the
  6083. * list. takes mirrors into account.
  6084. */
  6085. static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
  6086. {
  6087. struct btrfs_block_group_cache *block_group;
  6088. u64 free_bytes = 0;
  6089. int factor;
  6090. list_for_each_entry(block_group, groups_list, list) {
  6091. spin_lock(&block_group->lock);
  6092. if (!block_group->ro) {
  6093. spin_unlock(&block_group->lock);
  6094. continue;
  6095. }
  6096. if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
  6097. BTRFS_BLOCK_GROUP_RAID10 |
  6098. BTRFS_BLOCK_GROUP_DUP))
  6099. factor = 2;
  6100. else
  6101. factor = 1;
  6102. free_bytes += (block_group->key.offset -
  6103. btrfs_block_group_used(&block_group->item)) *
  6104. factor;
  6105. spin_unlock(&block_group->lock);
  6106. }
  6107. return free_bytes;
  6108. }
  6109. /*
  6110. * helper to account the unused space of all the readonly block group in the
  6111. * space_info. takes mirrors into account.
  6112. */
  6113. u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
  6114. {
  6115. int i;
  6116. u64 free_bytes = 0;
  6117. spin_lock(&sinfo->lock);
  6118. for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  6119. if (!list_empty(&sinfo->block_groups[i]))
  6120. free_bytes += __btrfs_get_ro_block_group_free_space(
  6121. &sinfo->block_groups[i]);
  6122. spin_unlock(&sinfo->lock);
  6123. return free_bytes;
  6124. }
  6125. int btrfs_set_block_group_rw(struct btrfs_root *root,
  6126. struct btrfs_block_group_cache *cache)
  6127. {
  6128. struct btrfs_space_info *sinfo = cache->space_info;
  6129. u64 num_bytes;
  6130. BUG_ON(!cache->ro);
  6131. spin_lock(&sinfo->lock);
  6132. spin_lock(&cache->lock);
  6133. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  6134. cache->bytes_super - btrfs_block_group_used(&cache->item);
  6135. sinfo->bytes_readonly -= num_bytes;
  6136. cache->ro = 0;
  6137. spin_unlock(&cache->lock);
  6138. spin_unlock(&sinfo->lock);
  6139. return 0;
  6140. }
  6141. /*
  6142. * checks to see if its even possible to relocate this block group.
  6143. *
  6144. * @return - -1 if it's not a good idea to relocate this block group, 0 if its
  6145. * ok to go ahead and try.
  6146. */
  6147. int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
  6148. {
  6149. struct btrfs_block_group_cache *block_group;
  6150. struct btrfs_space_info *space_info;
  6151. struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
  6152. struct btrfs_device *device;
  6153. u64 min_free;
  6154. u64 dev_min = 1;
  6155. u64 dev_nr = 0;
  6156. int index;
  6157. int full = 0;
  6158. int ret = 0;
  6159. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  6160. /* odd, couldn't find the block group, leave it alone */
  6161. if (!block_group)
  6162. return -1;
  6163. min_free = btrfs_block_group_used(&block_group->item);
  6164. /* no bytes used, we're good */
  6165. if (!min_free)
  6166. goto out;
  6167. space_info = block_group->space_info;
  6168. spin_lock(&space_info->lock);
  6169. full = space_info->full;
  6170. /*
  6171. * if this is the last block group we have in this space, we can't
  6172. * relocate it unless we're able to allocate a new chunk below.
  6173. *
  6174. * Otherwise, we need to make sure we have room in the space to handle
  6175. * all of the extents from this block group. If we can, we're good
  6176. */
  6177. if ((space_info->total_bytes != block_group->key.offset) &&
  6178. (space_info->bytes_used + space_info->bytes_reserved +
  6179. space_info->bytes_pinned + space_info->bytes_readonly +
  6180. min_free < space_info->total_bytes)) {
  6181. spin_unlock(&space_info->lock);
  6182. goto out;
  6183. }
  6184. spin_unlock(&space_info->lock);
  6185. /*
  6186. * ok we don't have enough space, but maybe we have free space on our
  6187. * devices to allocate new chunks for relocation, so loop through our
  6188. * alloc devices and guess if we have enough space. However, if we
  6189. * were marked as full, then we know there aren't enough chunks, and we
  6190. * can just return.
  6191. */
  6192. ret = -1;
  6193. if (full)
  6194. goto out;
  6195. /*
  6196. * index:
  6197. * 0: raid10
  6198. * 1: raid1
  6199. * 2: dup
  6200. * 3: raid0
  6201. * 4: single
  6202. */
  6203. index = get_block_group_index(block_group);
  6204. if (index == 0) {
  6205. dev_min = 4;
  6206. /* Divide by 2 */
  6207. min_free >>= 1;
  6208. } else if (index == 1) {
  6209. dev_min = 2;
  6210. } else if (index == 2) {
  6211. /* Multiply by 2 */
  6212. min_free <<= 1;
  6213. } else if (index == 3) {
  6214. dev_min = fs_devices->rw_devices;
  6215. do_div(min_free, dev_min);
  6216. }
  6217. mutex_lock(&root->fs_info->chunk_mutex);
  6218. list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
  6219. u64 dev_offset;
  6220. /*
  6221. * check to make sure we can actually find a chunk with enough
  6222. * space to fit our block group in.
  6223. */
  6224. if (device->total_bytes > device->bytes_used + min_free) {
  6225. ret = find_free_dev_extent(NULL, device, min_free,
  6226. &dev_offset, NULL);
  6227. if (!ret)
  6228. dev_nr++;
  6229. if (dev_nr >= dev_min)
  6230. break;
  6231. ret = -1;
  6232. }
  6233. }
  6234. mutex_unlock(&root->fs_info->chunk_mutex);
  6235. out:
  6236. btrfs_put_block_group(block_group);
  6237. return ret;
  6238. }
  6239. static int find_first_block_group(struct btrfs_root *root,
  6240. struct btrfs_path *path, struct btrfs_key *key)
  6241. {
  6242. int ret = 0;
  6243. struct btrfs_key found_key;
  6244. struct extent_buffer *leaf;
  6245. int slot;
  6246. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  6247. if (ret < 0)
  6248. goto out;
  6249. while (1) {
  6250. slot = path->slots[0];
  6251. leaf = path->nodes[0];
  6252. if (slot >= btrfs_header_nritems(leaf)) {
  6253. ret = btrfs_next_leaf(root, path);
  6254. if (ret == 0)
  6255. continue;
  6256. if (ret < 0)
  6257. goto out;
  6258. break;
  6259. }
  6260. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  6261. if (found_key.objectid >= key->objectid &&
  6262. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  6263. ret = 0;
  6264. goto out;
  6265. }
  6266. path->slots[0]++;
  6267. }
  6268. out:
  6269. return ret;
  6270. }
  6271. void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
  6272. {
  6273. struct btrfs_block_group_cache *block_group;
  6274. u64 last = 0;
  6275. while (1) {
  6276. struct inode *inode;
  6277. block_group = btrfs_lookup_first_block_group(info, last);
  6278. while (block_group) {
  6279. spin_lock(&block_group->lock);
  6280. if (block_group->iref)
  6281. break;
  6282. spin_unlock(&block_group->lock);
  6283. block_group = next_block_group(info->tree_root,
  6284. block_group);
  6285. }
  6286. if (!block_group) {
  6287. if (last == 0)
  6288. break;
  6289. last = 0;
  6290. continue;
  6291. }
  6292. inode = block_group->inode;
  6293. block_group->iref = 0;
  6294. block_group->inode = NULL;
  6295. spin_unlock(&block_group->lock);
  6296. iput(inode);
  6297. last = block_group->key.objectid + block_group->key.offset;
  6298. btrfs_put_block_group(block_group);
  6299. }
  6300. }
  6301. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  6302. {
  6303. struct btrfs_block_group_cache *block_group;
  6304. struct btrfs_space_info *space_info;
  6305. struct btrfs_caching_control *caching_ctl;
  6306. struct rb_node *n;
  6307. down_write(&info->extent_commit_sem);
  6308. while (!list_empty(&info->caching_block_groups)) {
  6309. caching_ctl = list_entry(info->caching_block_groups.next,
  6310. struct btrfs_caching_control, list);
  6311. list_del(&caching_ctl->list);
  6312. put_caching_control(caching_ctl);
  6313. }
  6314. up_write(&info->extent_commit_sem);
  6315. spin_lock(&info->block_group_cache_lock);
  6316. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  6317. block_group = rb_entry(n, struct btrfs_block_group_cache,
  6318. cache_node);
  6319. rb_erase(&block_group->cache_node,
  6320. &info->block_group_cache_tree);
  6321. spin_unlock(&info->block_group_cache_lock);
  6322. down_write(&block_group->space_info->groups_sem);
  6323. list_del(&block_group->list);
  6324. up_write(&block_group->space_info->groups_sem);
  6325. if (block_group->cached == BTRFS_CACHE_STARTED)
  6326. wait_block_group_cache_done(block_group);
  6327. /*
  6328. * We haven't cached this block group, which means we could
  6329. * possibly have excluded extents on this block group.
  6330. */
  6331. if (block_group->cached == BTRFS_CACHE_NO)
  6332. free_excluded_extents(info->extent_root, block_group);
  6333. btrfs_remove_free_space_cache(block_group);
  6334. btrfs_put_block_group(block_group);
  6335. spin_lock(&info->block_group_cache_lock);
  6336. }
  6337. spin_unlock(&info->block_group_cache_lock);
  6338. /* now that all the block groups are freed, go through and
  6339. * free all the space_info structs. This is only called during
  6340. * the final stages of unmount, and so we know nobody is
  6341. * using them. We call synchronize_rcu() once before we start,
  6342. * just to be on the safe side.
  6343. */
  6344. synchronize_rcu();
  6345. release_global_block_rsv(info);
  6346. while(!list_empty(&info->space_info)) {
  6347. space_info = list_entry(info->space_info.next,
  6348. struct btrfs_space_info,
  6349. list);
  6350. if (space_info->bytes_pinned > 0 ||
  6351. space_info->bytes_reserved > 0 ||
  6352. space_info->bytes_may_use > 0) {
  6353. WARN_ON(1);
  6354. dump_space_info(space_info, 0, 0);
  6355. }
  6356. list_del(&space_info->list);
  6357. kfree(space_info);
  6358. }
  6359. return 0;
  6360. }
  6361. static void __link_block_group(struct btrfs_space_info *space_info,
  6362. struct btrfs_block_group_cache *cache)
  6363. {
  6364. int index = get_block_group_index(cache);
  6365. down_write(&space_info->groups_sem);
  6366. list_add_tail(&cache->list, &space_info->block_groups[index]);
  6367. up_write(&space_info->groups_sem);
  6368. }
  6369. int btrfs_read_block_groups(struct btrfs_root *root)
  6370. {
  6371. struct btrfs_path *path;
  6372. int ret;
  6373. struct btrfs_block_group_cache *cache;
  6374. struct btrfs_fs_info *info = root->fs_info;
  6375. struct btrfs_space_info *space_info;
  6376. struct btrfs_key key;
  6377. struct btrfs_key found_key;
  6378. struct extent_buffer *leaf;
  6379. int need_clear = 0;
  6380. u64 cache_gen;
  6381. root = info->extent_root;
  6382. key.objectid = 0;
  6383. key.offset = 0;
  6384. btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
  6385. path = btrfs_alloc_path();
  6386. if (!path)
  6387. return -ENOMEM;
  6388. path->reada = 1;
  6389. cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
  6390. if (btrfs_test_opt(root, SPACE_CACHE) &&
  6391. btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
  6392. need_clear = 1;
  6393. if (btrfs_test_opt(root, CLEAR_CACHE))
  6394. need_clear = 1;
  6395. while (1) {
  6396. ret = find_first_block_group(root, path, &key);
  6397. if (ret > 0)
  6398. break;
  6399. if (ret != 0)
  6400. goto error;
  6401. leaf = path->nodes[0];
  6402. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  6403. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  6404. if (!cache) {
  6405. ret = -ENOMEM;
  6406. goto error;
  6407. }
  6408. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  6409. GFP_NOFS);
  6410. if (!cache->free_space_ctl) {
  6411. kfree(cache);
  6412. ret = -ENOMEM;
  6413. goto error;
  6414. }
  6415. atomic_set(&cache->count, 1);
  6416. spin_lock_init(&cache->lock);
  6417. cache->fs_info = info;
  6418. INIT_LIST_HEAD(&cache->list);
  6419. INIT_LIST_HEAD(&cache->cluster_list);
  6420. if (need_clear)
  6421. cache->disk_cache_state = BTRFS_DC_CLEAR;
  6422. read_extent_buffer(leaf, &cache->item,
  6423. btrfs_item_ptr_offset(leaf, path->slots[0]),
  6424. sizeof(cache->item));
  6425. memcpy(&cache->key, &found_key, sizeof(found_key));
  6426. key.objectid = found_key.objectid + found_key.offset;
  6427. btrfs_release_path(path);
  6428. cache->flags = btrfs_block_group_flags(&cache->item);
  6429. cache->sectorsize = root->sectorsize;
  6430. btrfs_init_free_space_ctl(cache);
  6431. /*
  6432. * We need to exclude the super stripes now so that the space
  6433. * info has super bytes accounted for, otherwise we'll think
  6434. * we have more space than we actually do.
  6435. */
  6436. exclude_super_stripes(root, cache);
  6437. /*
  6438. * check for two cases, either we are full, and therefore
  6439. * don't need to bother with the caching work since we won't
  6440. * find any space, or we are empty, and we can just add all
  6441. * the space in and be done with it. This saves us _alot_ of
  6442. * time, particularly in the full case.
  6443. */
  6444. if (found_key.offset == btrfs_block_group_used(&cache->item)) {
  6445. cache->last_byte_to_unpin = (u64)-1;
  6446. cache->cached = BTRFS_CACHE_FINISHED;
  6447. free_excluded_extents(root, cache);
  6448. } else if (btrfs_block_group_used(&cache->item) == 0) {
  6449. cache->last_byte_to_unpin = (u64)-1;
  6450. cache->cached = BTRFS_CACHE_FINISHED;
  6451. add_new_free_space(cache, root->fs_info,
  6452. found_key.objectid,
  6453. found_key.objectid +
  6454. found_key.offset);
  6455. free_excluded_extents(root, cache);
  6456. }
  6457. ret = update_space_info(info, cache->flags, found_key.offset,
  6458. btrfs_block_group_used(&cache->item),
  6459. &space_info);
  6460. BUG_ON(ret);
  6461. cache->space_info = space_info;
  6462. spin_lock(&cache->space_info->lock);
  6463. cache->space_info->bytes_readonly += cache->bytes_super;
  6464. spin_unlock(&cache->space_info->lock);
  6465. __link_block_group(space_info, cache);
  6466. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  6467. BUG_ON(ret);
  6468. set_avail_alloc_bits(root->fs_info, cache->flags);
  6469. if (btrfs_chunk_readonly(root, cache->key.objectid))
  6470. set_block_group_ro(cache, 1);
  6471. }
  6472. list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
  6473. if (!(get_alloc_profile(root, space_info->flags) &
  6474. (BTRFS_BLOCK_GROUP_RAID10 |
  6475. BTRFS_BLOCK_GROUP_RAID1 |
  6476. BTRFS_BLOCK_GROUP_DUP)))
  6477. continue;
  6478. /*
  6479. * avoid allocating from un-mirrored block group if there are
  6480. * mirrored block groups.
  6481. */
  6482. list_for_each_entry(cache, &space_info->block_groups[3], list)
  6483. set_block_group_ro(cache, 1);
  6484. list_for_each_entry(cache, &space_info->block_groups[4], list)
  6485. set_block_group_ro(cache, 1);
  6486. }
  6487. init_global_block_rsv(info);
  6488. ret = 0;
  6489. error:
  6490. btrfs_free_path(path);
  6491. return ret;
  6492. }
  6493. int btrfs_make_block_group(struct btrfs_trans_handle *trans,
  6494. struct btrfs_root *root, u64 bytes_used,
  6495. u64 type, u64 chunk_objectid, u64 chunk_offset,
  6496. u64 size)
  6497. {
  6498. int ret;
  6499. struct btrfs_root *extent_root;
  6500. struct btrfs_block_group_cache *cache;
  6501. extent_root = root->fs_info->extent_root;
  6502. root->fs_info->last_trans_log_full_commit = trans->transid;
  6503. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  6504. if (!cache)
  6505. return -ENOMEM;
  6506. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  6507. GFP_NOFS);
  6508. if (!cache->free_space_ctl) {
  6509. kfree(cache);
  6510. return -ENOMEM;
  6511. }
  6512. cache->key.objectid = chunk_offset;
  6513. cache->key.offset = size;
  6514. cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  6515. cache->sectorsize = root->sectorsize;
  6516. cache->fs_info = root->fs_info;
  6517. atomic_set(&cache->count, 1);
  6518. spin_lock_init(&cache->lock);
  6519. INIT_LIST_HEAD(&cache->list);
  6520. INIT_LIST_HEAD(&cache->cluster_list);
  6521. btrfs_init_free_space_ctl(cache);
  6522. btrfs_set_block_group_used(&cache->item, bytes_used);
  6523. btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
  6524. cache->flags = type;
  6525. btrfs_set_block_group_flags(&cache->item, type);
  6526. cache->last_byte_to_unpin = (u64)-1;
  6527. cache->cached = BTRFS_CACHE_FINISHED;
  6528. exclude_super_stripes(root, cache);
  6529. add_new_free_space(cache, root->fs_info, chunk_offset,
  6530. chunk_offset + size);
  6531. free_excluded_extents(root, cache);
  6532. ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
  6533. &cache->space_info);
  6534. BUG_ON(ret);
  6535. spin_lock(&cache->space_info->lock);
  6536. cache->space_info->bytes_readonly += cache->bytes_super;
  6537. spin_unlock(&cache->space_info->lock);
  6538. __link_block_group(cache->space_info, cache);
  6539. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  6540. BUG_ON(ret);
  6541. ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
  6542. sizeof(cache->item));
  6543. BUG_ON(ret);
  6544. set_avail_alloc_bits(extent_root->fs_info, type);
  6545. return 0;
  6546. }
  6547. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  6548. struct btrfs_root *root, u64 group_start)
  6549. {
  6550. struct btrfs_path *path;
  6551. struct btrfs_block_group_cache *block_group;
  6552. struct btrfs_free_cluster *cluster;
  6553. struct btrfs_root *tree_root = root->fs_info->tree_root;
  6554. struct btrfs_key key;
  6555. struct inode *inode;
  6556. int ret;
  6557. int factor;
  6558. root = root->fs_info->extent_root;
  6559. block_group = btrfs_lookup_block_group(root->fs_info, group_start);
  6560. BUG_ON(!block_group);
  6561. BUG_ON(!block_group->ro);
  6562. /*
  6563. * Free the reserved super bytes from this block group before
  6564. * remove it.
  6565. */
  6566. free_excluded_extents(root, block_group);
  6567. memcpy(&key, &block_group->key, sizeof(key));
  6568. if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
  6569. BTRFS_BLOCK_GROUP_RAID1 |
  6570. BTRFS_BLOCK_GROUP_RAID10))
  6571. factor = 2;
  6572. else
  6573. factor = 1;
  6574. /* make sure this block group isn't part of an allocation cluster */
  6575. cluster = &root->fs_info->data_alloc_cluster;
  6576. spin_lock(&cluster->refill_lock);
  6577. btrfs_return_cluster_to_free_space(block_group, cluster);
  6578. spin_unlock(&cluster->refill_lock);
  6579. /*
  6580. * make sure this block group isn't part of a metadata
  6581. * allocation cluster
  6582. */
  6583. cluster = &root->fs_info->meta_alloc_cluster;
  6584. spin_lock(&cluster->refill_lock);
  6585. btrfs_return_cluster_to_free_space(block_group, cluster);
  6586. spin_unlock(&cluster->refill_lock);
  6587. path = btrfs_alloc_path();
  6588. if (!path) {
  6589. ret = -ENOMEM;
  6590. goto out;
  6591. }
  6592. inode = lookup_free_space_inode(tree_root, block_group, path);
  6593. if (!IS_ERR(inode)) {
  6594. ret = btrfs_orphan_add(trans, inode);
  6595. BUG_ON(ret);
  6596. clear_nlink(inode);
  6597. /* One for the block groups ref */
  6598. spin_lock(&block_group->lock);
  6599. if (block_group->iref) {
  6600. block_group->iref = 0;
  6601. block_group->inode = NULL;
  6602. spin_unlock(&block_group->lock);
  6603. iput(inode);
  6604. } else {
  6605. spin_unlock(&block_group->lock);
  6606. }
  6607. /* One for our lookup ref */
  6608. btrfs_add_delayed_iput(inode);
  6609. }
  6610. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  6611. key.offset = block_group->key.objectid;
  6612. key.type = 0;
  6613. ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
  6614. if (ret < 0)
  6615. goto out;
  6616. if (ret > 0)
  6617. btrfs_release_path(path);
  6618. if (ret == 0) {
  6619. ret = btrfs_del_item(trans, tree_root, path);
  6620. if (ret)
  6621. goto out;
  6622. btrfs_release_path(path);
  6623. }
  6624. spin_lock(&root->fs_info->block_group_cache_lock);
  6625. rb_erase(&block_group->cache_node,
  6626. &root->fs_info->block_group_cache_tree);
  6627. spin_unlock(&root->fs_info->block_group_cache_lock);
  6628. down_write(&block_group->space_info->groups_sem);
  6629. /*
  6630. * we must use list_del_init so people can check to see if they
  6631. * are still on the list after taking the semaphore
  6632. */
  6633. list_del_init(&block_group->list);
  6634. up_write(&block_group->space_info->groups_sem);
  6635. if (block_group->cached == BTRFS_CACHE_STARTED)
  6636. wait_block_group_cache_done(block_group);
  6637. btrfs_remove_free_space_cache(block_group);
  6638. spin_lock(&block_group->space_info->lock);
  6639. block_group->space_info->total_bytes -= block_group->key.offset;
  6640. block_group->space_info->bytes_readonly -= block_group->key.offset;
  6641. block_group->space_info->disk_total -= block_group->key.offset * factor;
  6642. spin_unlock(&block_group->space_info->lock);
  6643. memcpy(&key, &block_group->key, sizeof(key));
  6644. btrfs_clear_space_info_full(root->fs_info);
  6645. btrfs_put_block_group(block_group);
  6646. btrfs_put_block_group(block_group);
  6647. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  6648. if (ret > 0)
  6649. ret = -EIO;
  6650. if (ret < 0)
  6651. goto out;
  6652. ret = btrfs_del_item(trans, root, path);
  6653. out:
  6654. btrfs_free_path(path);
  6655. return ret;
  6656. }
  6657. int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
  6658. {
  6659. struct btrfs_space_info *space_info;
  6660. struct btrfs_super_block *disk_super;
  6661. u64 features;
  6662. u64 flags;
  6663. int mixed = 0;
  6664. int ret;
  6665. disk_super = fs_info->super_copy;
  6666. if (!btrfs_super_root(disk_super))
  6667. return 1;
  6668. features = btrfs_super_incompat_flags(disk_super);
  6669. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  6670. mixed = 1;
  6671. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  6672. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  6673. if (ret)
  6674. goto out;
  6675. if (mixed) {
  6676. flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
  6677. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  6678. } else {
  6679. flags = BTRFS_BLOCK_GROUP_METADATA;
  6680. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  6681. if (ret)
  6682. goto out;
  6683. flags = BTRFS_BLOCK_GROUP_DATA;
  6684. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  6685. }
  6686. out:
  6687. return ret;
  6688. }
  6689. int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  6690. {
  6691. return unpin_extent_range(root, start, end);
  6692. }
  6693. int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
  6694. u64 num_bytes, u64 *actual_bytes)
  6695. {
  6696. return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
  6697. }
  6698. int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
  6699. {
  6700. struct btrfs_fs_info *fs_info = root->fs_info;
  6701. struct btrfs_block_group_cache *cache = NULL;
  6702. u64 group_trimmed;
  6703. u64 start;
  6704. u64 end;
  6705. u64 trimmed = 0;
  6706. int ret = 0;
  6707. cache = btrfs_lookup_block_group(fs_info, range->start);
  6708. while (cache) {
  6709. if (cache->key.objectid >= (range->start + range->len)) {
  6710. btrfs_put_block_group(cache);
  6711. break;
  6712. }
  6713. start = max(range->start, cache->key.objectid);
  6714. end = min(range->start + range->len,
  6715. cache->key.objectid + cache->key.offset);
  6716. if (end - start >= range->minlen) {
  6717. if (!block_group_cache_done(cache)) {
  6718. ret = cache_block_group(cache, NULL, root, 0);
  6719. if (!ret)
  6720. wait_block_group_cache_done(cache);
  6721. }
  6722. ret = btrfs_trim_block_group(cache,
  6723. &group_trimmed,
  6724. start,
  6725. end,
  6726. range->minlen);
  6727. trimmed += group_trimmed;
  6728. if (ret) {
  6729. btrfs_put_block_group(cache);
  6730. break;
  6731. }
  6732. }
  6733. cache = next_block_group(fs_info->tree_root, cache);
  6734. }
  6735. range->len = trimmed;
  6736. return ret;
  6737. }