extent-tree.c 216 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/sort.h>
  23. #include <linux/rcupdate.h>
  24. #include <linux/kthread.h>
  25. #include <linux/slab.h>
  26. #include <linux/ratelimit.h>
  27. #include "compat.h"
  28. #include "hash.h"
  29. #include "ctree.h"
  30. #include "disk-io.h"
  31. #include "print-tree.h"
  32. #include "transaction.h"
  33. #include "volumes.h"
  34. #include "locking.h"
  35. #include "free-space-cache.h"
  36. #undef SCRAMBLE_DELAYED_REFS
  37. /*
  38. * control flags for do_chunk_alloc's force field
  39. * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
  40. * if we really need one.
  41. *
  42. * CHUNK_ALLOC_LIMITED means to only try and allocate one
  43. * if we have very few chunks already allocated. This is
  44. * used as part of the clustering code to help make sure
  45. * we have a good pool of storage to cluster in, without
  46. * filling the FS with empty chunks
  47. *
  48. * CHUNK_ALLOC_FORCE means it must try to allocate one
  49. *
  50. */
  51. enum {
  52. CHUNK_ALLOC_NO_FORCE = 0,
  53. CHUNK_ALLOC_LIMITED = 1,
  54. CHUNK_ALLOC_FORCE = 2,
  55. };
  56. /*
  57. * Control how reservations are dealt with.
  58. *
  59. * RESERVE_FREE - freeing a reservation.
  60. * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
  61. * ENOSPC accounting
  62. * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
  63. * bytes_may_use as the ENOSPC accounting is done elsewhere
  64. */
  65. enum {
  66. RESERVE_FREE = 0,
  67. RESERVE_ALLOC = 1,
  68. RESERVE_ALLOC_NO_ACCOUNT = 2,
  69. };
  70. static int update_block_group(struct btrfs_trans_handle *trans,
  71. struct btrfs_root *root,
  72. u64 bytenr, u64 num_bytes, int alloc);
  73. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  74. struct btrfs_root *root,
  75. u64 bytenr, u64 num_bytes, u64 parent,
  76. u64 root_objectid, u64 owner_objectid,
  77. u64 owner_offset, int refs_to_drop,
  78. struct btrfs_delayed_extent_op *extra_op);
  79. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  80. struct extent_buffer *leaf,
  81. struct btrfs_extent_item *ei);
  82. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  83. struct btrfs_root *root,
  84. u64 parent, u64 root_objectid,
  85. u64 flags, u64 owner, u64 offset,
  86. struct btrfs_key *ins, int ref_mod);
  87. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  88. struct btrfs_root *root,
  89. u64 parent, u64 root_objectid,
  90. u64 flags, struct btrfs_disk_key *key,
  91. int level, struct btrfs_key *ins);
  92. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  93. struct btrfs_root *extent_root, u64 alloc_bytes,
  94. u64 flags, int force);
  95. static int find_next_key(struct btrfs_path *path, int level,
  96. struct btrfs_key *key);
  97. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  98. int dump_block_groups);
  99. static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
  100. u64 num_bytes, int reserve);
  101. static noinline int
  102. block_group_cache_done(struct btrfs_block_group_cache *cache)
  103. {
  104. smp_mb();
  105. return cache->cached == BTRFS_CACHE_FINISHED;
  106. }
  107. static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
  108. {
  109. return (cache->flags & bits) == bits;
  110. }
  111. static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
  112. {
  113. atomic_inc(&cache->count);
  114. }
  115. void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
  116. {
  117. if (atomic_dec_and_test(&cache->count)) {
  118. WARN_ON(cache->pinned > 0);
  119. WARN_ON(cache->reserved > 0);
  120. kfree(cache->free_space_ctl);
  121. kfree(cache);
  122. }
  123. }
  124. /*
  125. * this adds the block group to the fs_info rb tree for the block group
  126. * cache
  127. */
  128. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  129. struct btrfs_block_group_cache *block_group)
  130. {
  131. struct rb_node **p;
  132. struct rb_node *parent = NULL;
  133. struct btrfs_block_group_cache *cache;
  134. spin_lock(&info->block_group_cache_lock);
  135. p = &info->block_group_cache_tree.rb_node;
  136. while (*p) {
  137. parent = *p;
  138. cache = rb_entry(parent, struct btrfs_block_group_cache,
  139. cache_node);
  140. if (block_group->key.objectid < cache->key.objectid) {
  141. p = &(*p)->rb_left;
  142. } else if (block_group->key.objectid > cache->key.objectid) {
  143. p = &(*p)->rb_right;
  144. } else {
  145. spin_unlock(&info->block_group_cache_lock);
  146. return -EEXIST;
  147. }
  148. }
  149. rb_link_node(&block_group->cache_node, parent, p);
  150. rb_insert_color(&block_group->cache_node,
  151. &info->block_group_cache_tree);
  152. spin_unlock(&info->block_group_cache_lock);
  153. return 0;
  154. }
  155. /*
  156. * This will return the block group at or after bytenr if contains is 0, else
  157. * it will return the block group that contains the bytenr
  158. */
  159. static struct btrfs_block_group_cache *
  160. block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
  161. int contains)
  162. {
  163. struct btrfs_block_group_cache *cache, *ret = NULL;
  164. struct rb_node *n;
  165. u64 end, start;
  166. spin_lock(&info->block_group_cache_lock);
  167. n = info->block_group_cache_tree.rb_node;
  168. while (n) {
  169. cache = rb_entry(n, struct btrfs_block_group_cache,
  170. cache_node);
  171. end = cache->key.objectid + cache->key.offset - 1;
  172. start = cache->key.objectid;
  173. if (bytenr < start) {
  174. if (!contains && (!ret || start < ret->key.objectid))
  175. ret = cache;
  176. n = n->rb_left;
  177. } else if (bytenr > start) {
  178. if (contains && bytenr <= end) {
  179. ret = cache;
  180. break;
  181. }
  182. n = n->rb_right;
  183. } else {
  184. ret = cache;
  185. break;
  186. }
  187. }
  188. if (ret)
  189. btrfs_get_block_group(ret);
  190. spin_unlock(&info->block_group_cache_lock);
  191. return ret;
  192. }
  193. static int add_excluded_extent(struct btrfs_root *root,
  194. u64 start, u64 num_bytes)
  195. {
  196. u64 end = start + num_bytes - 1;
  197. set_extent_bits(&root->fs_info->freed_extents[0],
  198. start, end, EXTENT_UPTODATE, GFP_NOFS);
  199. set_extent_bits(&root->fs_info->freed_extents[1],
  200. start, end, EXTENT_UPTODATE, GFP_NOFS);
  201. return 0;
  202. }
  203. static void free_excluded_extents(struct btrfs_root *root,
  204. struct btrfs_block_group_cache *cache)
  205. {
  206. u64 start, end;
  207. start = cache->key.objectid;
  208. end = start + cache->key.offset - 1;
  209. clear_extent_bits(&root->fs_info->freed_extents[0],
  210. start, end, EXTENT_UPTODATE, GFP_NOFS);
  211. clear_extent_bits(&root->fs_info->freed_extents[1],
  212. start, end, EXTENT_UPTODATE, GFP_NOFS);
  213. }
  214. static int exclude_super_stripes(struct btrfs_root *root,
  215. struct btrfs_block_group_cache *cache)
  216. {
  217. u64 bytenr;
  218. u64 *logical;
  219. int stripe_len;
  220. int i, nr, ret;
  221. if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
  222. stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
  223. cache->bytes_super += stripe_len;
  224. ret = add_excluded_extent(root, cache->key.objectid,
  225. stripe_len);
  226. BUG_ON(ret); /* -ENOMEM */
  227. }
  228. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  229. bytenr = btrfs_sb_offset(i);
  230. ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
  231. cache->key.objectid, bytenr,
  232. 0, &logical, &nr, &stripe_len);
  233. BUG_ON(ret); /* -ENOMEM */
  234. while (nr--) {
  235. cache->bytes_super += stripe_len;
  236. ret = add_excluded_extent(root, logical[nr],
  237. stripe_len);
  238. BUG_ON(ret); /* -ENOMEM */
  239. }
  240. kfree(logical);
  241. }
  242. return 0;
  243. }
  244. static struct btrfs_caching_control *
  245. get_caching_control(struct btrfs_block_group_cache *cache)
  246. {
  247. struct btrfs_caching_control *ctl;
  248. spin_lock(&cache->lock);
  249. if (cache->cached != BTRFS_CACHE_STARTED) {
  250. spin_unlock(&cache->lock);
  251. return NULL;
  252. }
  253. /* We're loading it the fast way, so we don't have a caching_ctl. */
  254. if (!cache->caching_ctl) {
  255. spin_unlock(&cache->lock);
  256. return NULL;
  257. }
  258. ctl = cache->caching_ctl;
  259. atomic_inc(&ctl->count);
  260. spin_unlock(&cache->lock);
  261. return ctl;
  262. }
  263. static void put_caching_control(struct btrfs_caching_control *ctl)
  264. {
  265. if (atomic_dec_and_test(&ctl->count))
  266. kfree(ctl);
  267. }
  268. /*
  269. * this is only called by cache_block_group, since we could have freed extents
  270. * we need to check the pinned_extents for any extents that can't be used yet
  271. * since their free space will be released as soon as the transaction commits.
  272. */
  273. static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
  274. struct btrfs_fs_info *info, u64 start, u64 end)
  275. {
  276. u64 extent_start, extent_end, size, total_added = 0;
  277. int ret;
  278. while (start < end) {
  279. ret = find_first_extent_bit(info->pinned_extents, start,
  280. &extent_start, &extent_end,
  281. EXTENT_DIRTY | EXTENT_UPTODATE);
  282. if (ret)
  283. break;
  284. if (extent_start <= start) {
  285. start = extent_end + 1;
  286. } else if (extent_start > start && extent_start < end) {
  287. size = extent_start - start;
  288. total_added += size;
  289. ret = btrfs_add_free_space(block_group, start,
  290. size);
  291. BUG_ON(ret); /* -ENOMEM or logic error */
  292. start = extent_end + 1;
  293. } else {
  294. break;
  295. }
  296. }
  297. if (start < end) {
  298. size = end - start;
  299. total_added += size;
  300. ret = btrfs_add_free_space(block_group, start, size);
  301. BUG_ON(ret); /* -ENOMEM or logic error */
  302. }
  303. return total_added;
  304. }
  305. static noinline void caching_thread(struct btrfs_work *work)
  306. {
  307. struct btrfs_block_group_cache *block_group;
  308. struct btrfs_fs_info *fs_info;
  309. struct btrfs_caching_control *caching_ctl;
  310. struct btrfs_root *extent_root;
  311. struct btrfs_path *path;
  312. struct extent_buffer *leaf;
  313. struct btrfs_key key;
  314. u64 total_found = 0;
  315. u64 last = 0;
  316. u32 nritems;
  317. int ret = 0;
  318. caching_ctl = container_of(work, struct btrfs_caching_control, work);
  319. block_group = caching_ctl->block_group;
  320. fs_info = block_group->fs_info;
  321. extent_root = fs_info->extent_root;
  322. path = btrfs_alloc_path();
  323. if (!path)
  324. goto out;
  325. last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
  326. /*
  327. * We don't want to deadlock with somebody trying to allocate a new
  328. * extent for the extent root while also trying to search the extent
  329. * root to add free space. So we skip locking and search the commit
  330. * root, since its read-only
  331. */
  332. path->skip_locking = 1;
  333. path->search_commit_root = 1;
  334. path->reada = 1;
  335. key.objectid = last;
  336. key.offset = 0;
  337. key.type = BTRFS_EXTENT_ITEM_KEY;
  338. again:
  339. mutex_lock(&caching_ctl->mutex);
  340. /* need to make sure the commit_root doesn't disappear */
  341. down_read(&fs_info->extent_commit_sem);
  342. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  343. if (ret < 0)
  344. goto err;
  345. leaf = path->nodes[0];
  346. nritems = btrfs_header_nritems(leaf);
  347. while (1) {
  348. if (btrfs_fs_closing(fs_info) > 1) {
  349. last = (u64)-1;
  350. break;
  351. }
  352. if (path->slots[0] < nritems) {
  353. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  354. } else {
  355. ret = find_next_key(path, 0, &key);
  356. if (ret)
  357. break;
  358. if (need_resched() ||
  359. btrfs_next_leaf(extent_root, path)) {
  360. caching_ctl->progress = last;
  361. btrfs_release_path(path);
  362. up_read(&fs_info->extent_commit_sem);
  363. mutex_unlock(&caching_ctl->mutex);
  364. cond_resched();
  365. goto again;
  366. }
  367. leaf = path->nodes[0];
  368. nritems = btrfs_header_nritems(leaf);
  369. continue;
  370. }
  371. if (key.objectid < block_group->key.objectid) {
  372. path->slots[0]++;
  373. continue;
  374. }
  375. if (key.objectid >= block_group->key.objectid +
  376. block_group->key.offset)
  377. break;
  378. if (key.type == BTRFS_EXTENT_ITEM_KEY) {
  379. total_found += add_new_free_space(block_group,
  380. fs_info, last,
  381. key.objectid);
  382. last = key.objectid + key.offset;
  383. if (total_found > (1024 * 1024 * 2)) {
  384. total_found = 0;
  385. wake_up(&caching_ctl->wait);
  386. }
  387. }
  388. path->slots[0]++;
  389. }
  390. ret = 0;
  391. total_found += add_new_free_space(block_group, fs_info, last,
  392. block_group->key.objectid +
  393. block_group->key.offset);
  394. caching_ctl->progress = (u64)-1;
  395. spin_lock(&block_group->lock);
  396. block_group->caching_ctl = NULL;
  397. block_group->cached = BTRFS_CACHE_FINISHED;
  398. spin_unlock(&block_group->lock);
  399. err:
  400. btrfs_free_path(path);
  401. up_read(&fs_info->extent_commit_sem);
  402. free_excluded_extents(extent_root, block_group);
  403. mutex_unlock(&caching_ctl->mutex);
  404. out:
  405. wake_up(&caching_ctl->wait);
  406. put_caching_control(caching_ctl);
  407. btrfs_put_block_group(block_group);
  408. }
  409. static int cache_block_group(struct btrfs_block_group_cache *cache,
  410. struct btrfs_trans_handle *trans,
  411. struct btrfs_root *root,
  412. int load_cache_only)
  413. {
  414. DEFINE_WAIT(wait);
  415. struct btrfs_fs_info *fs_info = cache->fs_info;
  416. struct btrfs_caching_control *caching_ctl;
  417. int ret = 0;
  418. caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
  419. if (!caching_ctl)
  420. return -ENOMEM;
  421. INIT_LIST_HEAD(&caching_ctl->list);
  422. mutex_init(&caching_ctl->mutex);
  423. init_waitqueue_head(&caching_ctl->wait);
  424. caching_ctl->block_group = cache;
  425. caching_ctl->progress = cache->key.objectid;
  426. atomic_set(&caching_ctl->count, 1);
  427. caching_ctl->work.func = caching_thread;
  428. spin_lock(&cache->lock);
  429. /*
  430. * This should be a rare occasion, but this could happen I think in the
  431. * case where one thread starts to load the space cache info, and then
  432. * some other thread starts a transaction commit which tries to do an
  433. * allocation while the other thread is still loading the space cache
  434. * info. The previous loop should have kept us from choosing this block
  435. * group, but if we've moved to the state where we will wait on caching
  436. * block groups we need to first check if we're doing a fast load here,
  437. * so we can wait for it to finish, otherwise we could end up allocating
  438. * from a block group who's cache gets evicted for one reason or
  439. * another.
  440. */
  441. while (cache->cached == BTRFS_CACHE_FAST) {
  442. struct btrfs_caching_control *ctl;
  443. ctl = cache->caching_ctl;
  444. atomic_inc(&ctl->count);
  445. prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
  446. spin_unlock(&cache->lock);
  447. schedule();
  448. finish_wait(&ctl->wait, &wait);
  449. put_caching_control(ctl);
  450. spin_lock(&cache->lock);
  451. }
  452. if (cache->cached != BTRFS_CACHE_NO) {
  453. spin_unlock(&cache->lock);
  454. kfree(caching_ctl);
  455. return 0;
  456. }
  457. WARN_ON(cache->caching_ctl);
  458. cache->caching_ctl = caching_ctl;
  459. cache->cached = BTRFS_CACHE_FAST;
  460. spin_unlock(&cache->lock);
  461. /*
  462. * We can't do the read from on-disk cache during a commit since we need
  463. * to have the normal tree locking. Also if we are currently trying to
  464. * allocate blocks for the tree root we can't do the fast caching since
  465. * we likely hold important locks.
  466. */
  467. if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
  468. ret = load_free_space_cache(fs_info, cache);
  469. spin_lock(&cache->lock);
  470. if (ret == 1) {
  471. cache->caching_ctl = NULL;
  472. cache->cached = BTRFS_CACHE_FINISHED;
  473. cache->last_byte_to_unpin = (u64)-1;
  474. } else {
  475. if (load_cache_only) {
  476. cache->caching_ctl = NULL;
  477. cache->cached = BTRFS_CACHE_NO;
  478. } else {
  479. cache->cached = BTRFS_CACHE_STARTED;
  480. }
  481. }
  482. spin_unlock(&cache->lock);
  483. wake_up(&caching_ctl->wait);
  484. if (ret == 1) {
  485. put_caching_control(caching_ctl);
  486. free_excluded_extents(fs_info->extent_root, cache);
  487. return 0;
  488. }
  489. } else {
  490. /*
  491. * We are not going to do the fast caching, set cached to the
  492. * appropriate value and wakeup any waiters.
  493. */
  494. spin_lock(&cache->lock);
  495. if (load_cache_only) {
  496. cache->caching_ctl = NULL;
  497. cache->cached = BTRFS_CACHE_NO;
  498. } else {
  499. cache->cached = BTRFS_CACHE_STARTED;
  500. }
  501. spin_unlock(&cache->lock);
  502. wake_up(&caching_ctl->wait);
  503. }
  504. if (load_cache_only) {
  505. put_caching_control(caching_ctl);
  506. return 0;
  507. }
  508. down_write(&fs_info->extent_commit_sem);
  509. atomic_inc(&caching_ctl->count);
  510. list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
  511. up_write(&fs_info->extent_commit_sem);
  512. btrfs_get_block_group(cache);
  513. btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
  514. return ret;
  515. }
  516. /*
  517. * return the block group that starts at or after bytenr
  518. */
  519. static struct btrfs_block_group_cache *
  520. btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
  521. {
  522. struct btrfs_block_group_cache *cache;
  523. cache = block_group_cache_tree_search(info, bytenr, 0);
  524. return cache;
  525. }
  526. /*
  527. * return the block group that contains the given bytenr
  528. */
  529. struct btrfs_block_group_cache *btrfs_lookup_block_group(
  530. struct btrfs_fs_info *info,
  531. u64 bytenr)
  532. {
  533. struct btrfs_block_group_cache *cache;
  534. cache = block_group_cache_tree_search(info, bytenr, 1);
  535. return cache;
  536. }
  537. static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
  538. u64 flags)
  539. {
  540. struct list_head *head = &info->space_info;
  541. struct btrfs_space_info *found;
  542. flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
  543. rcu_read_lock();
  544. list_for_each_entry_rcu(found, head, list) {
  545. if (found->flags & flags) {
  546. rcu_read_unlock();
  547. return found;
  548. }
  549. }
  550. rcu_read_unlock();
  551. return NULL;
  552. }
  553. /*
  554. * after adding space to the filesystem, we need to clear the full flags
  555. * on all the space infos.
  556. */
  557. void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
  558. {
  559. struct list_head *head = &info->space_info;
  560. struct btrfs_space_info *found;
  561. rcu_read_lock();
  562. list_for_each_entry_rcu(found, head, list)
  563. found->full = 0;
  564. rcu_read_unlock();
  565. }
  566. static u64 div_factor(u64 num, int factor)
  567. {
  568. if (factor == 10)
  569. return num;
  570. num *= factor;
  571. do_div(num, 10);
  572. return num;
  573. }
  574. static u64 div_factor_fine(u64 num, int factor)
  575. {
  576. if (factor == 100)
  577. return num;
  578. num *= factor;
  579. do_div(num, 100);
  580. return num;
  581. }
  582. u64 btrfs_find_block_group(struct btrfs_root *root,
  583. u64 search_start, u64 search_hint, int owner)
  584. {
  585. struct btrfs_block_group_cache *cache;
  586. u64 used;
  587. u64 last = max(search_hint, search_start);
  588. u64 group_start = 0;
  589. int full_search = 0;
  590. int factor = 9;
  591. int wrapped = 0;
  592. again:
  593. while (1) {
  594. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  595. if (!cache)
  596. break;
  597. spin_lock(&cache->lock);
  598. last = cache->key.objectid + cache->key.offset;
  599. used = btrfs_block_group_used(&cache->item);
  600. if ((full_search || !cache->ro) &&
  601. block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
  602. if (used + cache->pinned + cache->reserved <
  603. div_factor(cache->key.offset, factor)) {
  604. group_start = cache->key.objectid;
  605. spin_unlock(&cache->lock);
  606. btrfs_put_block_group(cache);
  607. goto found;
  608. }
  609. }
  610. spin_unlock(&cache->lock);
  611. btrfs_put_block_group(cache);
  612. cond_resched();
  613. }
  614. if (!wrapped) {
  615. last = search_start;
  616. wrapped = 1;
  617. goto again;
  618. }
  619. if (!full_search && factor < 10) {
  620. last = search_start;
  621. full_search = 1;
  622. factor = 10;
  623. goto again;
  624. }
  625. found:
  626. return group_start;
  627. }
  628. /* simple helper to search for an existing extent at a given offset */
  629. int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
  630. {
  631. int ret;
  632. struct btrfs_key key;
  633. struct btrfs_path *path;
  634. path = btrfs_alloc_path();
  635. if (!path)
  636. return -ENOMEM;
  637. key.objectid = start;
  638. key.offset = len;
  639. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  640. ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
  641. 0, 0);
  642. btrfs_free_path(path);
  643. return ret;
  644. }
  645. /*
  646. * helper function to lookup reference count and flags of extent.
  647. *
  648. * the head node for delayed ref is used to store the sum of all the
  649. * reference count modifications queued up in the rbtree. the head
  650. * node may also store the extent flags to set. This way you can check
  651. * to see what the reference count and extent flags would be if all of
  652. * the delayed refs are not processed.
  653. */
  654. int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
  655. struct btrfs_root *root, u64 bytenr,
  656. u64 num_bytes, u64 *refs, u64 *flags)
  657. {
  658. struct btrfs_delayed_ref_head *head;
  659. struct btrfs_delayed_ref_root *delayed_refs;
  660. struct btrfs_path *path;
  661. struct btrfs_extent_item *ei;
  662. struct extent_buffer *leaf;
  663. struct btrfs_key key;
  664. u32 item_size;
  665. u64 num_refs;
  666. u64 extent_flags;
  667. int ret;
  668. path = btrfs_alloc_path();
  669. if (!path)
  670. return -ENOMEM;
  671. key.objectid = bytenr;
  672. key.type = BTRFS_EXTENT_ITEM_KEY;
  673. key.offset = num_bytes;
  674. if (!trans) {
  675. path->skip_locking = 1;
  676. path->search_commit_root = 1;
  677. }
  678. again:
  679. ret = btrfs_search_slot(trans, root->fs_info->extent_root,
  680. &key, path, 0, 0);
  681. if (ret < 0)
  682. goto out_free;
  683. if (ret == 0) {
  684. leaf = path->nodes[0];
  685. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  686. if (item_size >= sizeof(*ei)) {
  687. ei = btrfs_item_ptr(leaf, path->slots[0],
  688. struct btrfs_extent_item);
  689. num_refs = btrfs_extent_refs(leaf, ei);
  690. extent_flags = btrfs_extent_flags(leaf, ei);
  691. } else {
  692. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  693. struct btrfs_extent_item_v0 *ei0;
  694. BUG_ON(item_size != sizeof(*ei0));
  695. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  696. struct btrfs_extent_item_v0);
  697. num_refs = btrfs_extent_refs_v0(leaf, ei0);
  698. /* FIXME: this isn't correct for data */
  699. extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  700. #else
  701. BUG();
  702. #endif
  703. }
  704. BUG_ON(num_refs == 0);
  705. } else {
  706. num_refs = 0;
  707. extent_flags = 0;
  708. ret = 0;
  709. }
  710. if (!trans)
  711. goto out;
  712. delayed_refs = &trans->transaction->delayed_refs;
  713. spin_lock(&delayed_refs->lock);
  714. head = btrfs_find_delayed_ref_head(trans, bytenr);
  715. if (head) {
  716. if (!mutex_trylock(&head->mutex)) {
  717. atomic_inc(&head->node.refs);
  718. spin_unlock(&delayed_refs->lock);
  719. btrfs_release_path(path);
  720. /*
  721. * Mutex was contended, block until it's released and try
  722. * again
  723. */
  724. mutex_lock(&head->mutex);
  725. mutex_unlock(&head->mutex);
  726. btrfs_put_delayed_ref(&head->node);
  727. goto again;
  728. }
  729. if (head->extent_op && head->extent_op->update_flags)
  730. extent_flags |= head->extent_op->flags_to_set;
  731. else
  732. BUG_ON(num_refs == 0);
  733. num_refs += head->node.ref_mod;
  734. mutex_unlock(&head->mutex);
  735. }
  736. spin_unlock(&delayed_refs->lock);
  737. out:
  738. WARN_ON(num_refs == 0);
  739. if (refs)
  740. *refs = num_refs;
  741. if (flags)
  742. *flags = extent_flags;
  743. out_free:
  744. btrfs_free_path(path);
  745. return ret;
  746. }
  747. /*
  748. * Back reference rules. Back refs have three main goals:
  749. *
  750. * 1) differentiate between all holders of references to an extent so that
  751. * when a reference is dropped we can make sure it was a valid reference
  752. * before freeing the extent.
  753. *
  754. * 2) Provide enough information to quickly find the holders of an extent
  755. * if we notice a given block is corrupted or bad.
  756. *
  757. * 3) Make it easy to migrate blocks for FS shrinking or storage pool
  758. * maintenance. This is actually the same as #2, but with a slightly
  759. * different use case.
  760. *
  761. * There are two kinds of back refs. The implicit back refs is optimized
  762. * for pointers in non-shared tree blocks. For a given pointer in a block,
  763. * back refs of this kind provide information about the block's owner tree
  764. * and the pointer's key. These information allow us to find the block by
  765. * b-tree searching. The full back refs is for pointers in tree blocks not
  766. * referenced by their owner trees. The location of tree block is recorded
  767. * in the back refs. Actually the full back refs is generic, and can be
  768. * used in all cases the implicit back refs is used. The major shortcoming
  769. * of the full back refs is its overhead. Every time a tree block gets
  770. * COWed, we have to update back refs entry for all pointers in it.
  771. *
  772. * For a newly allocated tree block, we use implicit back refs for
  773. * pointers in it. This means most tree related operations only involve
  774. * implicit back refs. For a tree block created in old transaction, the
  775. * only way to drop a reference to it is COW it. So we can detect the
  776. * event that tree block loses its owner tree's reference and do the
  777. * back refs conversion.
  778. *
  779. * When a tree block is COW'd through a tree, there are four cases:
  780. *
  781. * The reference count of the block is one and the tree is the block's
  782. * owner tree. Nothing to do in this case.
  783. *
  784. * The reference count of the block is one and the tree is not the
  785. * block's owner tree. In this case, full back refs is used for pointers
  786. * in the block. Remove these full back refs, add implicit back refs for
  787. * every pointers in the new block.
  788. *
  789. * The reference count of the block is greater than one and the tree is
  790. * the block's owner tree. In this case, implicit back refs is used for
  791. * pointers in the block. Add full back refs for every pointers in the
  792. * block, increase lower level extents' reference counts. The original
  793. * implicit back refs are entailed to the new block.
  794. *
  795. * The reference count of the block is greater than one and the tree is
  796. * not the block's owner tree. Add implicit back refs for every pointer in
  797. * the new block, increase lower level extents' reference count.
  798. *
  799. * Back Reference Key composing:
  800. *
  801. * The key objectid corresponds to the first byte in the extent,
  802. * The key type is used to differentiate between types of back refs.
  803. * There are different meanings of the key offset for different types
  804. * of back refs.
  805. *
  806. * File extents can be referenced by:
  807. *
  808. * - multiple snapshots, subvolumes, or different generations in one subvol
  809. * - different files inside a single subvolume
  810. * - different offsets inside a file (bookend extents in file.c)
  811. *
  812. * The extent ref structure for the implicit back refs has fields for:
  813. *
  814. * - Objectid of the subvolume root
  815. * - objectid of the file holding the reference
  816. * - original offset in the file
  817. * - how many bookend extents
  818. *
  819. * The key offset for the implicit back refs is hash of the first
  820. * three fields.
  821. *
  822. * The extent ref structure for the full back refs has field for:
  823. *
  824. * - number of pointers in the tree leaf
  825. *
  826. * The key offset for the implicit back refs is the first byte of
  827. * the tree leaf
  828. *
  829. * When a file extent is allocated, The implicit back refs is used.
  830. * the fields are filled in:
  831. *
  832. * (root_key.objectid, inode objectid, offset in file, 1)
  833. *
  834. * When a file extent is removed file truncation, we find the
  835. * corresponding implicit back refs and check the following fields:
  836. *
  837. * (btrfs_header_owner(leaf), inode objectid, offset in file)
  838. *
  839. * Btree extents can be referenced by:
  840. *
  841. * - Different subvolumes
  842. *
  843. * Both the implicit back refs and the full back refs for tree blocks
  844. * only consist of key. The key offset for the implicit back refs is
  845. * objectid of block's owner tree. The key offset for the full back refs
  846. * is the first byte of parent block.
  847. *
  848. * When implicit back refs is used, information about the lowest key and
  849. * level of the tree block are required. These information are stored in
  850. * tree block info structure.
  851. */
  852. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  853. static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
  854. struct btrfs_root *root,
  855. struct btrfs_path *path,
  856. u64 owner, u32 extra_size)
  857. {
  858. struct btrfs_extent_item *item;
  859. struct btrfs_extent_item_v0 *ei0;
  860. struct btrfs_extent_ref_v0 *ref0;
  861. struct btrfs_tree_block_info *bi;
  862. struct extent_buffer *leaf;
  863. struct btrfs_key key;
  864. struct btrfs_key found_key;
  865. u32 new_size = sizeof(*item);
  866. u64 refs;
  867. int ret;
  868. leaf = path->nodes[0];
  869. BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
  870. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  871. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  872. struct btrfs_extent_item_v0);
  873. refs = btrfs_extent_refs_v0(leaf, ei0);
  874. if (owner == (u64)-1) {
  875. while (1) {
  876. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  877. ret = btrfs_next_leaf(root, path);
  878. if (ret < 0)
  879. return ret;
  880. BUG_ON(ret > 0); /* Corruption */
  881. leaf = path->nodes[0];
  882. }
  883. btrfs_item_key_to_cpu(leaf, &found_key,
  884. path->slots[0]);
  885. BUG_ON(key.objectid != found_key.objectid);
  886. if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
  887. path->slots[0]++;
  888. continue;
  889. }
  890. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  891. struct btrfs_extent_ref_v0);
  892. owner = btrfs_ref_objectid_v0(leaf, ref0);
  893. break;
  894. }
  895. }
  896. btrfs_release_path(path);
  897. if (owner < BTRFS_FIRST_FREE_OBJECTID)
  898. new_size += sizeof(*bi);
  899. new_size -= sizeof(*ei0);
  900. ret = btrfs_search_slot(trans, root, &key, path,
  901. new_size + extra_size, 1);
  902. if (ret < 0)
  903. return ret;
  904. BUG_ON(ret); /* Corruption */
  905. btrfs_extend_item(trans, root, path, new_size);
  906. leaf = path->nodes[0];
  907. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  908. btrfs_set_extent_refs(leaf, item, refs);
  909. /* FIXME: get real generation */
  910. btrfs_set_extent_generation(leaf, item, 0);
  911. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  912. btrfs_set_extent_flags(leaf, item,
  913. BTRFS_EXTENT_FLAG_TREE_BLOCK |
  914. BTRFS_BLOCK_FLAG_FULL_BACKREF);
  915. bi = (struct btrfs_tree_block_info *)(item + 1);
  916. /* FIXME: get first key of the block */
  917. memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
  918. btrfs_set_tree_block_level(leaf, bi, (int)owner);
  919. } else {
  920. btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
  921. }
  922. btrfs_mark_buffer_dirty(leaf);
  923. return 0;
  924. }
  925. #endif
  926. static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
  927. {
  928. u32 high_crc = ~(u32)0;
  929. u32 low_crc = ~(u32)0;
  930. __le64 lenum;
  931. lenum = cpu_to_le64(root_objectid);
  932. high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
  933. lenum = cpu_to_le64(owner);
  934. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  935. lenum = cpu_to_le64(offset);
  936. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  937. return ((u64)high_crc << 31) ^ (u64)low_crc;
  938. }
  939. static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
  940. struct btrfs_extent_data_ref *ref)
  941. {
  942. return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
  943. btrfs_extent_data_ref_objectid(leaf, ref),
  944. btrfs_extent_data_ref_offset(leaf, ref));
  945. }
  946. static int match_extent_data_ref(struct extent_buffer *leaf,
  947. struct btrfs_extent_data_ref *ref,
  948. u64 root_objectid, u64 owner, u64 offset)
  949. {
  950. if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
  951. btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
  952. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  953. return 0;
  954. return 1;
  955. }
  956. static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
  957. struct btrfs_root *root,
  958. struct btrfs_path *path,
  959. u64 bytenr, u64 parent,
  960. u64 root_objectid,
  961. u64 owner, u64 offset)
  962. {
  963. struct btrfs_key key;
  964. struct btrfs_extent_data_ref *ref;
  965. struct extent_buffer *leaf;
  966. u32 nritems;
  967. int ret;
  968. int recow;
  969. int err = -ENOENT;
  970. key.objectid = bytenr;
  971. if (parent) {
  972. key.type = BTRFS_SHARED_DATA_REF_KEY;
  973. key.offset = parent;
  974. } else {
  975. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  976. key.offset = hash_extent_data_ref(root_objectid,
  977. owner, offset);
  978. }
  979. again:
  980. recow = 0;
  981. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  982. if (ret < 0) {
  983. err = ret;
  984. goto fail;
  985. }
  986. if (parent) {
  987. if (!ret)
  988. return 0;
  989. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  990. key.type = BTRFS_EXTENT_REF_V0_KEY;
  991. btrfs_release_path(path);
  992. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  993. if (ret < 0) {
  994. err = ret;
  995. goto fail;
  996. }
  997. if (!ret)
  998. return 0;
  999. #endif
  1000. goto fail;
  1001. }
  1002. leaf = path->nodes[0];
  1003. nritems = btrfs_header_nritems(leaf);
  1004. while (1) {
  1005. if (path->slots[0] >= nritems) {
  1006. ret = btrfs_next_leaf(root, path);
  1007. if (ret < 0)
  1008. err = ret;
  1009. if (ret)
  1010. goto fail;
  1011. leaf = path->nodes[0];
  1012. nritems = btrfs_header_nritems(leaf);
  1013. recow = 1;
  1014. }
  1015. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1016. if (key.objectid != bytenr ||
  1017. key.type != BTRFS_EXTENT_DATA_REF_KEY)
  1018. goto fail;
  1019. ref = btrfs_item_ptr(leaf, path->slots[0],
  1020. struct btrfs_extent_data_ref);
  1021. if (match_extent_data_ref(leaf, ref, root_objectid,
  1022. owner, offset)) {
  1023. if (recow) {
  1024. btrfs_release_path(path);
  1025. goto again;
  1026. }
  1027. err = 0;
  1028. break;
  1029. }
  1030. path->slots[0]++;
  1031. }
  1032. fail:
  1033. return err;
  1034. }
  1035. static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
  1036. struct btrfs_root *root,
  1037. struct btrfs_path *path,
  1038. u64 bytenr, u64 parent,
  1039. u64 root_objectid, u64 owner,
  1040. u64 offset, int refs_to_add)
  1041. {
  1042. struct btrfs_key key;
  1043. struct extent_buffer *leaf;
  1044. u32 size;
  1045. u32 num_refs;
  1046. int ret;
  1047. key.objectid = bytenr;
  1048. if (parent) {
  1049. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1050. key.offset = parent;
  1051. size = sizeof(struct btrfs_shared_data_ref);
  1052. } else {
  1053. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1054. key.offset = hash_extent_data_ref(root_objectid,
  1055. owner, offset);
  1056. size = sizeof(struct btrfs_extent_data_ref);
  1057. }
  1058. ret = btrfs_insert_empty_item(trans, root, path, &key, size);
  1059. if (ret && ret != -EEXIST)
  1060. goto fail;
  1061. leaf = path->nodes[0];
  1062. if (parent) {
  1063. struct btrfs_shared_data_ref *ref;
  1064. ref = btrfs_item_ptr(leaf, path->slots[0],
  1065. struct btrfs_shared_data_ref);
  1066. if (ret == 0) {
  1067. btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
  1068. } else {
  1069. num_refs = btrfs_shared_data_ref_count(leaf, ref);
  1070. num_refs += refs_to_add;
  1071. btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
  1072. }
  1073. } else {
  1074. struct btrfs_extent_data_ref *ref;
  1075. while (ret == -EEXIST) {
  1076. ref = btrfs_item_ptr(leaf, path->slots[0],
  1077. struct btrfs_extent_data_ref);
  1078. if (match_extent_data_ref(leaf, ref, root_objectid,
  1079. owner, offset))
  1080. break;
  1081. btrfs_release_path(path);
  1082. key.offset++;
  1083. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1084. size);
  1085. if (ret && ret != -EEXIST)
  1086. goto fail;
  1087. leaf = path->nodes[0];
  1088. }
  1089. ref = btrfs_item_ptr(leaf, path->slots[0],
  1090. struct btrfs_extent_data_ref);
  1091. if (ret == 0) {
  1092. btrfs_set_extent_data_ref_root(leaf, ref,
  1093. root_objectid);
  1094. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  1095. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  1096. btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
  1097. } else {
  1098. num_refs = btrfs_extent_data_ref_count(leaf, ref);
  1099. num_refs += refs_to_add;
  1100. btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
  1101. }
  1102. }
  1103. btrfs_mark_buffer_dirty(leaf);
  1104. ret = 0;
  1105. fail:
  1106. btrfs_release_path(path);
  1107. return ret;
  1108. }
  1109. static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
  1110. struct btrfs_root *root,
  1111. struct btrfs_path *path,
  1112. int refs_to_drop)
  1113. {
  1114. struct btrfs_key key;
  1115. struct btrfs_extent_data_ref *ref1 = NULL;
  1116. struct btrfs_shared_data_ref *ref2 = NULL;
  1117. struct extent_buffer *leaf;
  1118. u32 num_refs = 0;
  1119. int ret = 0;
  1120. leaf = path->nodes[0];
  1121. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1122. if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1123. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1124. struct btrfs_extent_data_ref);
  1125. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1126. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1127. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1128. struct btrfs_shared_data_ref);
  1129. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1130. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1131. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1132. struct btrfs_extent_ref_v0 *ref0;
  1133. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1134. struct btrfs_extent_ref_v0);
  1135. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1136. #endif
  1137. } else {
  1138. BUG();
  1139. }
  1140. BUG_ON(num_refs < refs_to_drop);
  1141. num_refs -= refs_to_drop;
  1142. if (num_refs == 0) {
  1143. ret = btrfs_del_item(trans, root, path);
  1144. } else {
  1145. if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
  1146. btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
  1147. else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
  1148. btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
  1149. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1150. else {
  1151. struct btrfs_extent_ref_v0 *ref0;
  1152. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1153. struct btrfs_extent_ref_v0);
  1154. btrfs_set_ref_count_v0(leaf, ref0, num_refs);
  1155. }
  1156. #endif
  1157. btrfs_mark_buffer_dirty(leaf);
  1158. }
  1159. return ret;
  1160. }
  1161. static noinline u32 extent_data_ref_count(struct btrfs_root *root,
  1162. struct btrfs_path *path,
  1163. struct btrfs_extent_inline_ref *iref)
  1164. {
  1165. struct btrfs_key key;
  1166. struct extent_buffer *leaf;
  1167. struct btrfs_extent_data_ref *ref1;
  1168. struct btrfs_shared_data_ref *ref2;
  1169. u32 num_refs = 0;
  1170. leaf = path->nodes[0];
  1171. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1172. if (iref) {
  1173. if (btrfs_extent_inline_ref_type(leaf, iref) ==
  1174. BTRFS_EXTENT_DATA_REF_KEY) {
  1175. ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
  1176. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1177. } else {
  1178. ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
  1179. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1180. }
  1181. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1182. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1183. struct btrfs_extent_data_ref);
  1184. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1185. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1186. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1187. struct btrfs_shared_data_ref);
  1188. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1189. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1190. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1191. struct btrfs_extent_ref_v0 *ref0;
  1192. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1193. struct btrfs_extent_ref_v0);
  1194. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1195. #endif
  1196. } else {
  1197. WARN_ON(1);
  1198. }
  1199. return num_refs;
  1200. }
  1201. static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
  1202. struct btrfs_root *root,
  1203. struct btrfs_path *path,
  1204. u64 bytenr, u64 parent,
  1205. u64 root_objectid)
  1206. {
  1207. struct btrfs_key key;
  1208. int ret;
  1209. key.objectid = bytenr;
  1210. if (parent) {
  1211. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1212. key.offset = parent;
  1213. } else {
  1214. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1215. key.offset = root_objectid;
  1216. }
  1217. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1218. if (ret > 0)
  1219. ret = -ENOENT;
  1220. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1221. if (ret == -ENOENT && parent) {
  1222. btrfs_release_path(path);
  1223. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1224. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1225. if (ret > 0)
  1226. ret = -ENOENT;
  1227. }
  1228. #endif
  1229. return ret;
  1230. }
  1231. static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
  1232. struct btrfs_root *root,
  1233. struct btrfs_path *path,
  1234. u64 bytenr, u64 parent,
  1235. u64 root_objectid)
  1236. {
  1237. struct btrfs_key key;
  1238. int ret;
  1239. key.objectid = bytenr;
  1240. if (parent) {
  1241. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1242. key.offset = parent;
  1243. } else {
  1244. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1245. key.offset = root_objectid;
  1246. }
  1247. ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
  1248. btrfs_release_path(path);
  1249. return ret;
  1250. }
  1251. static inline int extent_ref_type(u64 parent, u64 owner)
  1252. {
  1253. int type;
  1254. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1255. if (parent > 0)
  1256. type = BTRFS_SHARED_BLOCK_REF_KEY;
  1257. else
  1258. type = BTRFS_TREE_BLOCK_REF_KEY;
  1259. } else {
  1260. if (parent > 0)
  1261. type = BTRFS_SHARED_DATA_REF_KEY;
  1262. else
  1263. type = BTRFS_EXTENT_DATA_REF_KEY;
  1264. }
  1265. return type;
  1266. }
  1267. static int find_next_key(struct btrfs_path *path, int level,
  1268. struct btrfs_key *key)
  1269. {
  1270. for (; level < BTRFS_MAX_LEVEL; level++) {
  1271. if (!path->nodes[level])
  1272. break;
  1273. if (path->slots[level] + 1 >=
  1274. btrfs_header_nritems(path->nodes[level]))
  1275. continue;
  1276. if (level == 0)
  1277. btrfs_item_key_to_cpu(path->nodes[level], key,
  1278. path->slots[level] + 1);
  1279. else
  1280. btrfs_node_key_to_cpu(path->nodes[level], key,
  1281. path->slots[level] + 1);
  1282. return 0;
  1283. }
  1284. return 1;
  1285. }
  1286. /*
  1287. * look for inline back ref. if back ref is found, *ref_ret is set
  1288. * to the address of inline back ref, and 0 is returned.
  1289. *
  1290. * if back ref isn't found, *ref_ret is set to the address where it
  1291. * should be inserted, and -ENOENT is returned.
  1292. *
  1293. * if insert is true and there are too many inline back refs, the path
  1294. * points to the extent item, and -EAGAIN is returned.
  1295. *
  1296. * NOTE: inline back refs are ordered in the same way that back ref
  1297. * items in the tree are ordered.
  1298. */
  1299. static noinline_for_stack
  1300. int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1301. struct btrfs_root *root,
  1302. struct btrfs_path *path,
  1303. struct btrfs_extent_inline_ref **ref_ret,
  1304. u64 bytenr, u64 num_bytes,
  1305. u64 parent, u64 root_objectid,
  1306. u64 owner, u64 offset, int insert)
  1307. {
  1308. struct btrfs_key key;
  1309. struct extent_buffer *leaf;
  1310. struct btrfs_extent_item *ei;
  1311. struct btrfs_extent_inline_ref *iref;
  1312. u64 flags;
  1313. u64 item_size;
  1314. unsigned long ptr;
  1315. unsigned long end;
  1316. int extra_size;
  1317. int type;
  1318. int want;
  1319. int ret;
  1320. int err = 0;
  1321. key.objectid = bytenr;
  1322. key.type = BTRFS_EXTENT_ITEM_KEY;
  1323. key.offset = num_bytes;
  1324. want = extent_ref_type(parent, owner);
  1325. if (insert) {
  1326. extra_size = btrfs_extent_inline_ref_size(want);
  1327. path->keep_locks = 1;
  1328. } else
  1329. extra_size = -1;
  1330. ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
  1331. if (ret < 0) {
  1332. err = ret;
  1333. goto out;
  1334. }
  1335. if (ret && !insert) {
  1336. err = -ENOENT;
  1337. goto out;
  1338. }
  1339. BUG_ON(ret); /* Corruption */
  1340. leaf = path->nodes[0];
  1341. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1342. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1343. if (item_size < sizeof(*ei)) {
  1344. if (!insert) {
  1345. err = -ENOENT;
  1346. goto out;
  1347. }
  1348. ret = convert_extent_item_v0(trans, root, path, owner,
  1349. extra_size);
  1350. if (ret < 0) {
  1351. err = ret;
  1352. goto out;
  1353. }
  1354. leaf = path->nodes[0];
  1355. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1356. }
  1357. #endif
  1358. BUG_ON(item_size < sizeof(*ei));
  1359. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1360. flags = btrfs_extent_flags(leaf, ei);
  1361. ptr = (unsigned long)(ei + 1);
  1362. end = (unsigned long)ei + item_size;
  1363. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  1364. ptr += sizeof(struct btrfs_tree_block_info);
  1365. BUG_ON(ptr > end);
  1366. } else {
  1367. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
  1368. }
  1369. err = -ENOENT;
  1370. while (1) {
  1371. if (ptr >= end) {
  1372. WARN_ON(ptr > end);
  1373. break;
  1374. }
  1375. iref = (struct btrfs_extent_inline_ref *)ptr;
  1376. type = btrfs_extent_inline_ref_type(leaf, iref);
  1377. if (want < type)
  1378. break;
  1379. if (want > type) {
  1380. ptr += btrfs_extent_inline_ref_size(type);
  1381. continue;
  1382. }
  1383. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1384. struct btrfs_extent_data_ref *dref;
  1385. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1386. if (match_extent_data_ref(leaf, dref, root_objectid,
  1387. owner, offset)) {
  1388. err = 0;
  1389. break;
  1390. }
  1391. if (hash_extent_data_ref_item(leaf, dref) <
  1392. hash_extent_data_ref(root_objectid, owner, offset))
  1393. break;
  1394. } else {
  1395. u64 ref_offset;
  1396. ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
  1397. if (parent > 0) {
  1398. if (parent == ref_offset) {
  1399. err = 0;
  1400. break;
  1401. }
  1402. if (ref_offset < parent)
  1403. break;
  1404. } else {
  1405. if (root_objectid == ref_offset) {
  1406. err = 0;
  1407. break;
  1408. }
  1409. if (ref_offset < root_objectid)
  1410. break;
  1411. }
  1412. }
  1413. ptr += btrfs_extent_inline_ref_size(type);
  1414. }
  1415. if (err == -ENOENT && insert) {
  1416. if (item_size + extra_size >=
  1417. BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
  1418. err = -EAGAIN;
  1419. goto out;
  1420. }
  1421. /*
  1422. * To add new inline back ref, we have to make sure
  1423. * there is no corresponding back ref item.
  1424. * For simplicity, we just do not add new inline back
  1425. * ref if there is any kind of item for this block
  1426. */
  1427. if (find_next_key(path, 0, &key) == 0 &&
  1428. key.objectid == bytenr &&
  1429. key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
  1430. err = -EAGAIN;
  1431. goto out;
  1432. }
  1433. }
  1434. *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
  1435. out:
  1436. if (insert) {
  1437. path->keep_locks = 0;
  1438. btrfs_unlock_up_safe(path, 1);
  1439. }
  1440. return err;
  1441. }
  1442. /*
  1443. * helper to add new inline back ref
  1444. */
  1445. static noinline_for_stack
  1446. void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1447. struct btrfs_root *root,
  1448. struct btrfs_path *path,
  1449. struct btrfs_extent_inline_ref *iref,
  1450. u64 parent, u64 root_objectid,
  1451. u64 owner, u64 offset, int refs_to_add,
  1452. struct btrfs_delayed_extent_op *extent_op)
  1453. {
  1454. struct extent_buffer *leaf;
  1455. struct btrfs_extent_item *ei;
  1456. unsigned long ptr;
  1457. unsigned long end;
  1458. unsigned long item_offset;
  1459. u64 refs;
  1460. int size;
  1461. int type;
  1462. leaf = path->nodes[0];
  1463. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1464. item_offset = (unsigned long)iref - (unsigned long)ei;
  1465. type = extent_ref_type(parent, owner);
  1466. size = btrfs_extent_inline_ref_size(type);
  1467. btrfs_extend_item(trans, root, path, size);
  1468. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1469. refs = btrfs_extent_refs(leaf, ei);
  1470. refs += refs_to_add;
  1471. btrfs_set_extent_refs(leaf, ei, refs);
  1472. if (extent_op)
  1473. __run_delayed_extent_op(extent_op, leaf, ei);
  1474. ptr = (unsigned long)ei + item_offset;
  1475. end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
  1476. if (ptr < end - size)
  1477. memmove_extent_buffer(leaf, ptr + size, ptr,
  1478. end - size - ptr);
  1479. iref = (struct btrfs_extent_inline_ref *)ptr;
  1480. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  1481. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1482. struct btrfs_extent_data_ref *dref;
  1483. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1484. btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
  1485. btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
  1486. btrfs_set_extent_data_ref_offset(leaf, dref, offset);
  1487. btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
  1488. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1489. struct btrfs_shared_data_ref *sref;
  1490. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1491. btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
  1492. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1493. } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  1494. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1495. } else {
  1496. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  1497. }
  1498. btrfs_mark_buffer_dirty(leaf);
  1499. }
  1500. static int lookup_extent_backref(struct btrfs_trans_handle *trans,
  1501. struct btrfs_root *root,
  1502. struct btrfs_path *path,
  1503. struct btrfs_extent_inline_ref **ref_ret,
  1504. u64 bytenr, u64 num_bytes, u64 parent,
  1505. u64 root_objectid, u64 owner, u64 offset)
  1506. {
  1507. int ret;
  1508. ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
  1509. bytenr, num_bytes, parent,
  1510. root_objectid, owner, offset, 0);
  1511. if (ret != -ENOENT)
  1512. return ret;
  1513. btrfs_release_path(path);
  1514. *ref_ret = NULL;
  1515. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1516. ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
  1517. root_objectid);
  1518. } else {
  1519. ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
  1520. root_objectid, owner, offset);
  1521. }
  1522. return ret;
  1523. }
  1524. /*
  1525. * helper to update/remove inline back ref
  1526. */
  1527. static noinline_for_stack
  1528. void update_inline_extent_backref(struct btrfs_trans_handle *trans,
  1529. struct btrfs_root *root,
  1530. struct btrfs_path *path,
  1531. struct btrfs_extent_inline_ref *iref,
  1532. int refs_to_mod,
  1533. struct btrfs_delayed_extent_op *extent_op)
  1534. {
  1535. struct extent_buffer *leaf;
  1536. struct btrfs_extent_item *ei;
  1537. struct btrfs_extent_data_ref *dref = NULL;
  1538. struct btrfs_shared_data_ref *sref = NULL;
  1539. unsigned long ptr;
  1540. unsigned long end;
  1541. u32 item_size;
  1542. int size;
  1543. int type;
  1544. u64 refs;
  1545. leaf = path->nodes[0];
  1546. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1547. refs = btrfs_extent_refs(leaf, ei);
  1548. WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
  1549. refs += refs_to_mod;
  1550. btrfs_set_extent_refs(leaf, ei, refs);
  1551. if (extent_op)
  1552. __run_delayed_extent_op(extent_op, leaf, ei);
  1553. type = btrfs_extent_inline_ref_type(leaf, iref);
  1554. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1555. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1556. refs = btrfs_extent_data_ref_count(leaf, dref);
  1557. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1558. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1559. refs = btrfs_shared_data_ref_count(leaf, sref);
  1560. } else {
  1561. refs = 1;
  1562. BUG_ON(refs_to_mod != -1);
  1563. }
  1564. BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
  1565. refs += refs_to_mod;
  1566. if (refs > 0) {
  1567. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  1568. btrfs_set_extent_data_ref_count(leaf, dref, refs);
  1569. else
  1570. btrfs_set_shared_data_ref_count(leaf, sref, refs);
  1571. } else {
  1572. size = btrfs_extent_inline_ref_size(type);
  1573. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1574. ptr = (unsigned long)iref;
  1575. end = (unsigned long)ei + item_size;
  1576. if (ptr + size < end)
  1577. memmove_extent_buffer(leaf, ptr, ptr + size,
  1578. end - ptr - size);
  1579. item_size -= size;
  1580. btrfs_truncate_item(trans, root, path, item_size, 1);
  1581. }
  1582. btrfs_mark_buffer_dirty(leaf);
  1583. }
  1584. static noinline_for_stack
  1585. int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
  1586. struct btrfs_root *root,
  1587. struct btrfs_path *path,
  1588. u64 bytenr, u64 num_bytes, u64 parent,
  1589. u64 root_objectid, u64 owner,
  1590. u64 offset, int refs_to_add,
  1591. struct btrfs_delayed_extent_op *extent_op)
  1592. {
  1593. struct btrfs_extent_inline_ref *iref;
  1594. int ret;
  1595. ret = lookup_inline_extent_backref(trans, root, path, &iref,
  1596. bytenr, num_bytes, parent,
  1597. root_objectid, owner, offset, 1);
  1598. if (ret == 0) {
  1599. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
  1600. update_inline_extent_backref(trans, root, path, iref,
  1601. refs_to_add, extent_op);
  1602. } else if (ret == -ENOENT) {
  1603. setup_inline_extent_backref(trans, root, path, iref, parent,
  1604. root_objectid, owner, offset,
  1605. refs_to_add, extent_op);
  1606. ret = 0;
  1607. }
  1608. return ret;
  1609. }
  1610. static int insert_extent_backref(struct btrfs_trans_handle *trans,
  1611. struct btrfs_root *root,
  1612. struct btrfs_path *path,
  1613. u64 bytenr, u64 parent, u64 root_objectid,
  1614. u64 owner, u64 offset, int refs_to_add)
  1615. {
  1616. int ret;
  1617. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1618. BUG_ON(refs_to_add != 1);
  1619. ret = insert_tree_block_ref(trans, root, path, bytenr,
  1620. parent, root_objectid);
  1621. } else {
  1622. ret = insert_extent_data_ref(trans, root, path, bytenr,
  1623. parent, root_objectid,
  1624. owner, offset, refs_to_add);
  1625. }
  1626. return ret;
  1627. }
  1628. static int remove_extent_backref(struct btrfs_trans_handle *trans,
  1629. struct btrfs_root *root,
  1630. struct btrfs_path *path,
  1631. struct btrfs_extent_inline_ref *iref,
  1632. int refs_to_drop, int is_data)
  1633. {
  1634. int ret = 0;
  1635. BUG_ON(!is_data && refs_to_drop != 1);
  1636. if (iref) {
  1637. update_inline_extent_backref(trans, root, path, iref,
  1638. -refs_to_drop, NULL);
  1639. } else if (is_data) {
  1640. ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
  1641. } else {
  1642. ret = btrfs_del_item(trans, root, path);
  1643. }
  1644. return ret;
  1645. }
  1646. static int btrfs_issue_discard(struct block_device *bdev,
  1647. u64 start, u64 len)
  1648. {
  1649. return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
  1650. }
  1651. static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
  1652. u64 num_bytes, u64 *actual_bytes)
  1653. {
  1654. int ret;
  1655. u64 discarded_bytes = 0;
  1656. struct btrfs_bio *bbio = NULL;
  1657. /* Tell the block device(s) that the sectors can be discarded */
  1658. ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
  1659. bytenr, &num_bytes, &bbio, 0);
  1660. /* Error condition is -ENOMEM */
  1661. if (!ret) {
  1662. struct btrfs_bio_stripe *stripe = bbio->stripes;
  1663. int i;
  1664. for (i = 0; i < bbio->num_stripes; i++, stripe++) {
  1665. if (!stripe->dev->can_discard)
  1666. continue;
  1667. ret = btrfs_issue_discard(stripe->dev->bdev,
  1668. stripe->physical,
  1669. stripe->length);
  1670. if (!ret)
  1671. discarded_bytes += stripe->length;
  1672. else if (ret != -EOPNOTSUPP)
  1673. break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
  1674. /*
  1675. * Just in case we get back EOPNOTSUPP for some reason,
  1676. * just ignore the return value so we don't screw up
  1677. * people calling discard_extent.
  1678. */
  1679. ret = 0;
  1680. }
  1681. kfree(bbio);
  1682. }
  1683. if (actual_bytes)
  1684. *actual_bytes = discarded_bytes;
  1685. return ret;
  1686. }
  1687. /* Can return -ENOMEM */
  1688. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1689. struct btrfs_root *root,
  1690. u64 bytenr, u64 num_bytes, u64 parent,
  1691. u64 root_objectid, u64 owner, u64 offset, int for_cow)
  1692. {
  1693. int ret;
  1694. struct btrfs_fs_info *fs_info = root->fs_info;
  1695. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
  1696. root_objectid == BTRFS_TREE_LOG_OBJECTID);
  1697. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1698. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  1699. num_bytes,
  1700. parent, root_objectid, (int)owner,
  1701. BTRFS_ADD_DELAYED_REF, NULL, for_cow);
  1702. } else {
  1703. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  1704. num_bytes,
  1705. parent, root_objectid, owner, offset,
  1706. BTRFS_ADD_DELAYED_REF, NULL, for_cow);
  1707. }
  1708. return ret;
  1709. }
  1710. static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1711. struct btrfs_root *root,
  1712. u64 bytenr, u64 num_bytes,
  1713. u64 parent, u64 root_objectid,
  1714. u64 owner, u64 offset, int refs_to_add,
  1715. struct btrfs_delayed_extent_op *extent_op)
  1716. {
  1717. struct btrfs_path *path;
  1718. struct extent_buffer *leaf;
  1719. struct btrfs_extent_item *item;
  1720. u64 refs;
  1721. int ret;
  1722. int err = 0;
  1723. path = btrfs_alloc_path();
  1724. if (!path)
  1725. return -ENOMEM;
  1726. path->reada = 1;
  1727. path->leave_spinning = 1;
  1728. /* this will setup the path even if it fails to insert the back ref */
  1729. ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
  1730. path, bytenr, num_bytes, parent,
  1731. root_objectid, owner, offset,
  1732. refs_to_add, extent_op);
  1733. if (ret == 0)
  1734. goto out;
  1735. if (ret != -EAGAIN) {
  1736. err = ret;
  1737. goto out;
  1738. }
  1739. leaf = path->nodes[0];
  1740. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1741. refs = btrfs_extent_refs(leaf, item);
  1742. btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
  1743. if (extent_op)
  1744. __run_delayed_extent_op(extent_op, leaf, item);
  1745. btrfs_mark_buffer_dirty(leaf);
  1746. btrfs_release_path(path);
  1747. path->reada = 1;
  1748. path->leave_spinning = 1;
  1749. /* now insert the actual backref */
  1750. ret = insert_extent_backref(trans, root->fs_info->extent_root,
  1751. path, bytenr, parent, root_objectid,
  1752. owner, offset, refs_to_add);
  1753. if (ret)
  1754. btrfs_abort_transaction(trans, root, ret);
  1755. out:
  1756. btrfs_free_path(path);
  1757. return err;
  1758. }
  1759. static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
  1760. struct btrfs_root *root,
  1761. struct btrfs_delayed_ref_node *node,
  1762. struct btrfs_delayed_extent_op *extent_op,
  1763. int insert_reserved)
  1764. {
  1765. int ret = 0;
  1766. struct btrfs_delayed_data_ref *ref;
  1767. struct btrfs_key ins;
  1768. u64 parent = 0;
  1769. u64 ref_root = 0;
  1770. u64 flags = 0;
  1771. ins.objectid = node->bytenr;
  1772. ins.offset = node->num_bytes;
  1773. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1774. ref = btrfs_delayed_node_to_data_ref(node);
  1775. if (node->type == BTRFS_SHARED_DATA_REF_KEY)
  1776. parent = ref->parent;
  1777. else
  1778. ref_root = ref->root;
  1779. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1780. if (extent_op) {
  1781. BUG_ON(extent_op->update_key);
  1782. flags |= extent_op->flags_to_set;
  1783. }
  1784. ret = alloc_reserved_file_extent(trans, root,
  1785. parent, ref_root, flags,
  1786. ref->objectid, ref->offset,
  1787. &ins, node->ref_mod);
  1788. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1789. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  1790. node->num_bytes, parent,
  1791. ref_root, ref->objectid,
  1792. ref->offset, node->ref_mod,
  1793. extent_op);
  1794. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1795. ret = __btrfs_free_extent(trans, root, node->bytenr,
  1796. node->num_bytes, parent,
  1797. ref_root, ref->objectid,
  1798. ref->offset, node->ref_mod,
  1799. extent_op);
  1800. } else {
  1801. BUG();
  1802. }
  1803. return ret;
  1804. }
  1805. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  1806. struct extent_buffer *leaf,
  1807. struct btrfs_extent_item *ei)
  1808. {
  1809. u64 flags = btrfs_extent_flags(leaf, ei);
  1810. if (extent_op->update_flags) {
  1811. flags |= extent_op->flags_to_set;
  1812. btrfs_set_extent_flags(leaf, ei, flags);
  1813. }
  1814. if (extent_op->update_key) {
  1815. struct btrfs_tree_block_info *bi;
  1816. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
  1817. bi = (struct btrfs_tree_block_info *)(ei + 1);
  1818. btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
  1819. }
  1820. }
  1821. static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
  1822. struct btrfs_root *root,
  1823. struct btrfs_delayed_ref_node *node,
  1824. struct btrfs_delayed_extent_op *extent_op)
  1825. {
  1826. struct btrfs_key key;
  1827. struct btrfs_path *path;
  1828. struct btrfs_extent_item *ei;
  1829. struct extent_buffer *leaf;
  1830. u32 item_size;
  1831. int ret;
  1832. int err = 0;
  1833. if (trans->aborted)
  1834. return 0;
  1835. path = btrfs_alloc_path();
  1836. if (!path)
  1837. return -ENOMEM;
  1838. key.objectid = node->bytenr;
  1839. key.type = BTRFS_EXTENT_ITEM_KEY;
  1840. key.offset = node->num_bytes;
  1841. path->reada = 1;
  1842. path->leave_spinning = 1;
  1843. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
  1844. path, 0, 1);
  1845. if (ret < 0) {
  1846. err = ret;
  1847. goto out;
  1848. }
  1849. if (ret > 0) {
  1850. err = -EIO;
  1851. goto out;
  1852. }
  1853. leaf = path->nodes[0];
  1854. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1855. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1856. if (item_size < sizeof(*ei)) {
  1857. ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
  1858. path, (u64)-1, 0);
  1859. if (ret < 0) {
  1860. err = ret;
  1861. goto out;
  1862. }
  1863. leaf = path->nodes[0];
  1864. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1865. }
  1866. #endif
  1867. BUG_ON(item_size < sizeof(*ei));
  1868. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1869. __run_delayed_extent_op(extent_op, leaf, ei);
  1870. btrfs_mark_buffer_dirty(leaf);
  1871. out:
  1872. btrfs_free_path(path);
  1873. return err;
  1874. }
  1875. static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
  1876. struct btrfs_root *root,
  1877. struct btrfs_delayed_ref_node *node,
  1878. struct btrfs_delayed_extent_op *extent_op,
  1879. int insert_reserved)
  1880. {
  1881. int ret = 0;
  1882. struct btrfs_delayed_tree_ref *ref;
  1883. struct btrfs_key ins;
  1884. u64 parent = 0;
  1885. u64 ref_root = 0;
  1886. ins.objectid = node->bytenr;
  1887. ins.offset = node->num_bytes;
  1888. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1889. ref = btrfs_delayed_node_to_tree_ref(node);
  1890. if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  1891. parent = ref->parent;
  1892. else
  1893. ref_root = ref->root;
  1894. BUG_ON(node->ref_mod != 1);
  1895. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1896. BUG_ON(!extent_op || !extent_op->update_flags ||
  1897. !extent_op->update_key);
  1898. ret = alloc_reserved_tree_block(trans, root,
  1899. parent, ref_root,
  1900. extent_op->flags_to_set,
  1901. &extent_op->key,
  1902. ref->level, &ins);
  1903. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1904. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  1905. node->num_bytes, parent, ref_root,
  1906. ref->level, 0, 1, extent_op);
  1907. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1908. ret = __btrfs_free_extent(trans, root, node->bytenr,
  1909. node->num_bytes, parent, ref_root,
  1910. ref->level, 0, 1, extent_op);
  1911. } else {
  1912. BUG();
  1913. }
  1914. return ret;
  1915. }
  1916. /* helper function to actually process a single delayed ref entry */
  1917. static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
  1918. struct btrfs_root *root,
  1919. struct btrfs_delayed_ref_node *node,
  1920. struct btrfs_delayed_extent_op *extent_op,
  1921. int insert_reserved)
  1922. {
  1923. int ret = 0;
  1924. if (trans->aborted)
  1925. return 0;
  1926. if (btrfs_delayed_ref_is_head(node)) {
  1927. struct btrfs_delayed_ref_head *head;
  1928. /*
  1929. * we've hit the end of the chain and we were supposed
  1930. * to insert this extent into the tree. But, it got
  1931. * deleted before we ever needed to insert it, so all
  1932. * we have to do is clean up the accounting
  1933. */
  1934. BUG_ON(extent_op);
  1935. head = btrfs_delayed_node_to_head(node);
  1936. if (insert_reserved) {
  1937. btrfs_pin_extent(root, node->bytenr,
  1938. node->num_bytes, 1);
  1939. if (head->is_data) {
  1940. ret = btrfs_del_csums(trans, root,
  1941. node->bytenr,
  1942. node->num_bytes);
  1943. }
  1944. }
  1945. mutex_unlock(&head->mutex);
  1946. return ret;
  1947. }
  1948. if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
  1949. node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  1950. ret = run_delayed_tree_ref(trans, root, node, extent_op,
  1951. insert_reserved);
  1952. else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
  1953. node->type == BTRFS_SHARED_DATA_REF_KEY)
  1954. ret = run_delayed_data_ref(trans, root, node, extent_op,
  1955. insert_reserved);
  1956. else
  1957. BUG();
  1958. return ret;
  1959. }
  1960. static noinline struct btrfs_delayed_ref_node *
  1961. select_delayed_ref(struct btrfs_delayed_ref_head *head)
  1962. {
  1963. struct rb_node *node;
  1964. struct btrfs_delayed_ref_node *ref;
  1965. int action = BTRFS_ADD_DELAYED_REF;
  1966. again:
  1967. /*
  1968. * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
  1969. * this prevents ref count from going down to zero when
  1970. * there still are pending delayed ref.
  1971. */
  1972. node = rb_prev(&head->node.rb_node);
  1973. while (1) {
  1974. if (!node)
  1975. break;
  1976. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  1977. rb_node);
  1978. if (ref->bytenr != head->node.bytenr)
  1979. break;
  1980. if (ref->action == action)
  1981. return ref;
  1982. node = rb_prev(node);
  1983. }
  1984. if (action == BTRFS_ADD_DELAYED_REF) {
  1985. action = BTRFS_DROP_DELAYED_REF;
  1986. goto again;
  1987. }
  1988. return NULL;
  1989. }
  1990. /*
  1991. * Returns 0 on success or if called with an already aborted transaction.
  1992. * Returns -ENOMEM or -EIO on failure and will abort the transaction.
  1993. */
  1994. static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
  1995. struct btrfs_root *root,
  1996. struct list_head *cluster)
  1997. {
  1998. struct btrfs_delayed_ref_root *delayed_refs;
  1999. struct btrfs_delayed_ref_node *ref;
  2000. struct btrfs_delayed_ref_head *locked_ref = NULL;
  2001. struct btrfs_delayed_extent_op *extent_op;
  2002. struct btrfs_fs_info *fs_info = root->fs_info;
  2003. int ret;
  2004. int count = 0;
  2005. int must_insert_reserved = 0;
  2006. delayed_refs = &trans->transaction->delayed_refs;
  2007. while (1) {
  2008. if (!locked_ref) {
  2009. /* pick a new head ref from the cluster list */
  2010. if (list_empty(cluster))
  2011. break;
  2012. locked_ref = list_entry(cluster->next,
  2013. struct btrfs_delayed_ref_head, cluster);
  2014. /* grab the lock that says we are going to process
  2015. * all the refs for this head */
  2016. ret = btrfs_delayed_ref_lock(trans, locked_ref);
  2017. /*
  2018. * we may have dropped the spin lock to get the head
  2019. * mutex lock, and that might have given someone else
  2020. * time to free the head. If that's true, it has been
  2021. * removed from our list and we can move on.
  2022. */
  2023. if (ret == -EAGAIN) {
  2024. locked_ref = NULL;
  2025. count++;
  2026. continue;
  2027. }
  2028. }
  2029. /*
  2030. * We need to try and merge add/drops of the same ref since we
  2031. * can run into issues with relocate dropping the implicit ref
  2032. * and then it being added back again before the drop can
  2033. * finish. If we merged anything we need to re-loop so we can
  2034. * get a good ref.
  2035. */
  2036. btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
  2037. locked_ref);
  2038. /*
  2039. * locked_ref is the head node, so we have to go one
  2040. * node back for any delayed ref updates
  2041. */
  2042. ref = select_delayed_ref(locked_ref);
  2043. if (ref && ref->seq &&
  2044. btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
  2045. /*
  2046. * there are still refs with lower seq numbers in the
  2047. * process of being added. Don't run this ref yet.
  2048. */
  2049. list_del_init(&locked_ref->cluster);
  2050. mutex_unlock(&locked_ref->mutex);
  2051. locked_ref = NULL;
  2052. delayed_refs->num_heads_ready++;
  2053. spin_unlock(&delayed_refs->lock);
  2054. cond_resched();
  2055. spin_lock(&delayed_refs->lock);
  2056. continue;
  2057. }
  2058. /*
  2059. * record the must insert reserved flag before we
  2060. * drop the spin lock.
  2061. */
  2062. must_insert_reserved = locked_ref->must_insert_reserved;
  2063. locked_ref->must_insert_reserved = 0;
  2064. extent_op = locked_ref->extent_op;
  2065. locked_ref->extent_op = NULL;
  2066. if (!ref) {
  2067. /* All delayed refs have been processed, Go ahead
  2068. * and send the head node to run_one_delayed_ref,
  2069. * so that any accounting fixes can happen
  2070. */
  2071. ref = &locked_ref->node;
  2072. if (extent_op && must_insert_reserved) {
  2073. kfree(extent_op);
  2074. extent_op = NULL;
  2075. }
  2076. if (extent_op) {
  2077. spin_unlock(&delayed_refs->lock);
  2078. ret = run_delayed_extent_op(trans, root,
  2079. ref, extent_op);
  2080. kfree(extent_op);
  2081. if (ret) {
  2082. printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
  2083. spin_lock(&delayed_refs->lock);
  2084. return ret;
  2085. }
  2086. goto next;
  2087. }
  2088. list_del_init(&locked_ref->cluster);
  2089. locked_ref = NULL;
  2090. }
  2091. ref->in_tree = 0;
  2092. rb_erase(&ref->rb_node, &delayed_refs->root);
  2093. delayed_refs->num_entries--;
  2094. if (locked_ref) {
  2095. /*
  2096. * when we play the delayed ref, also correct the
  2097. * ref_mod on head
  2098. */
  2099. switch (ref->action) {
  2100. case BTRFS_ADD_DELAYED_REF:
  2101. case BTRFS_ADD_DELAYED_EXTENT:
  2102. locked_ref->node.ref_mod -= ref->ref_mod;
  2103. break;
  2104. case BTRFS_DROP_DELAYED_REF:
  2105. locked_ref->node.ref_mod += ref->ref_mod;
  2106. break;
  2107. default:
  2108. WARN_ON(1);
  2109. }
  2110. }
  2111. spin_unlock(&delayed_refs->lock);
  2112. ret = run_one_delayed_ref(trans, root, ref, extent_op,
  2113. must_insert_reserved);
  2114. btrfs_put_delayed_ref(ref);
  2115. kfree(extent_op);
  2116. count++;
  2117. if (ret) {
  2118. printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
  2119. spin_lock(&delayed_refs->lock);
  2120. return ret;
  2121. }
  2122. next:
  2123. do_chunk_alloc(trans, fs_info->extent_root,
  2124. 2 * 1024 * 1024,
  2125. btrfs_get_alloc_profile(root, 0),
  2126. CHUNK_ALLOC_NO_FORCE);
  2127. cond_resched();
  2128. spin_lock(&delayed_refs->lock);
  2129. }
  2130. return count;
  2131. }
  2132. #ifdef SCRAMBLE_DELAYED_REFS
  2133. /*
  2134. * Normally delayed refs get processed in ascending bytenr order. This
  2135. * correlates in most cases to the order added. To expose dependencies on this
  2136. * order, we start to process the tree in the middle instead of the beginning
  2137. */
  2138. static u64 find_middle(struct rb_root *root)
  2139. {
  2140. struct rb_node *n = root->rb_node;
  2141. struct btrfs_delayed_ref_node *entry;
  2142. int alt = 1;
  2143. u64 middle;
  2144. u64 first = 0, last = 0;
  2145. n = rb_first(root);
  2146. if (n) {
  2147. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2148. first = entry->bytenr;
  2149. }
  2150. n = rb_last(root);
  2151. if (n) {
  2152. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2153. last = entry->bytenr;
  2154. }
  2155. n = root->rb_node;
  2156. while (n) {
  2157. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2158. WARN_ON(!entry->in_tree);
  2159. middle = entry->bytenr;
  2160. if (alt)
  2161. n = n->rb_left;
  2162. else
  2163. n = n->rb_right;
  2164. alt = 1 - alt;
  2165. }
  2166. return middle;
  2167. }
  2168. #endif
  2169. int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
  2170. struct btrfs_fs_info *fs_info)
  2171. {
  2172. struct qgroup_update *qgroup_update;
  2173. int ret = 0;
  2174. if (list_empty(&trans->qgroup_ref_list) !=
  2175. !trans->delayed_ref_elem.seq) {
  2176. /* list without seq or seq without list */
  2177. printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
  2178. list_empty(&trans->qgroup_ref_list) ? "" : " not",
  2179. trans->delayed_ref_elem.seq);
  2180. BUG();
  2181. }
  2182. if (!trans->delayed_ref_elem.seq)
  2183. return 0;
  2184. while (!list_empty(&trans->qgroup_ref_list)) {
  2185. qgroup_update = list_first_entry(&trans->qgroup_ref_list,
  2186. struct qgroup_update, list);
  2187. list_del(&qgroup_update->list);
  2188. if (!ret)
  2189. ret = btrfs_qgroup_account_ref(
  2190. trans, fs_info, qgroup_update->node,
  2191. qgroup_update->extent_op);
  2192. kfree(qgroup_update);
  2193. }
  2194. btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
  2195. return ret;
  2196. }
  2197. /*
  2198. * this starts processing the delayed reference count updates and
  2199. * extent insertions we have queued up so far. count can be
  2200. * 0, which means to process everything in the tree at the start
  2201. * of the run (but not newly added entries), or it can be some target
  2202. * number you'd like to process.
  2203. *
  2204. * Returns 0 on success or if called with an aborted transaction
  2205. * Returns <0 on error and aborts the transaction
  2206. */
  2207. int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2208. struct btrfs_root *root, unsigned long count)
  2209. {
  2210. struct rb_node *node;
  2211. struct btrfs_delayed_ref_root *delayed_refs;
  2212. struct btrfs_delayed_ref_node *ref;
  2213. struct list_head cluster;
  2214. int ret;
  2215. u64 delayed_start;
  2216. int run_all = count == (unsigned long)-1;
  2217. int run_most = 0;
  2218. int loops;
  2219. /* We'll clean this up in btrfs_cleanup_transaction */
  2220. if (trans->aborted)
  2221. return 0;
  2222. if (root == root->fs_info->extent_root)
  2223. root = root->fs_info->tree_root;
  2224. do_chunk_alloc(trans, root->fs_info->extent_root,
  2225. 2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
  2226. CHUNK_ALLOC_NO_FORCE);
  2227. btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
  2228. delayed_refs = &trans->transaction->delayed_refs;
  2229. INIT_LIST_HEAD(&cluster);
  2230. again:
  2231. loops = 0;
  2232. spin_lock(&delayed_refs->lock);
  2233. #ifdef SCRAMBLE_DELAYED_REFS
  2234. delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
  2235. #endif
  2236. if (count == 0) {
  2237. count = delayed_refs->num_entries * 2;
  2238. run_most = 1;
  2239. }
  2240. while (1) {
  2241. if (!(run_all || run_most) &&
  2242. delayed_refs->num_heads_ready < 64)
  2243. break;
  2244. /*
  2245. * go find something we can process in the rbtree. We start at
  2246. * the beginning of the tree, and then build a cluster
  2247. * of refs to process starting at the first one we are able to
  2248. * lock
  2249. */
  2250. delayed_start = delayed_refs->run_delayed_start;
  2251. ret = btrfs_find_ref_cluster(trans, &cluster,
  2252. delayed_refs->run_delayed_start);
  2253. if (ret)
  2254. break;
  2255. ret = run_clustered_refs(trans, root, &cluster);
  2256. if (ret < 0) {
  2257. spin_unlock(&delayed_refs->lock);
  2258. btrfs_abort_transaction(trans, root, ret);
  2259. return ret;
  2260. }
  2261. count -= min_t(unsigned long, ret, count);
  2262. if (count == 0)
  2263. break;
  2264. if (delayed_start >= delayed_refs->run_delayed_start) {
  2265. if (loops == 0) {
  2266. /*
  2267. * btrfs_find_ref_cluster looped. let's do one
  2268. * more cycle. if we don't run any delayed ref
  2269. * during that cycle (because we can't because
  2270. * all of them are blocked), bail out.
  2271. */
  2272. loops = 1;
  2273. } else {
  2274. /*
  2275. * no runnable refs left, stop trying
  2276. */
  2277. BUG_ON(run_all);
  2278. break;
  2279. }
  2280. }
  2281. if (ret) {
  2282. /* refs were run, let's reset staleness detection */
  2283. loops = 0;
  2284. }
  2285. }
  2286. if (run_all) {
  2287. node = rb_first(&delayed_refs->root);
  2288. if (!node)
  2289. goto out;
  2290. count = (unsigned long)-1;
  2291. while (node) {
  2292. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  2293. rb_node);
  2294. if (btrfs_delayed_ref_is_head(ref)) {
  2295. struct btrfs_delayed_ref_head *head;
  2296. head = btrfs_delayed_node_to_head(ref);
  2297. atomic_inc(&ref->refs);
  2298. spin_unlock(&delayed_refs->lock);
  2299. /*
  2300. * Mutex was contended, block until it's
  2301. * released and try again
  2302. */
  2303. mutex_lock(&head->mutex);
  2304. mutex_unlock(&head->mutex);
  2305. btrfs_put_delayed_ref(ref);
  2306. cond_resched();
  2307. goto again;
  2308. }
  2309. node = rb_next(node);
  2310. }
  2311. spin_unlock(&delayed_refs->lock);
  2312. schedule_timeout(1);
  2313. goto again;
  2314. }
  2315. out:
  2316. spin_unlock(&delayed_refs->lock);
  2317. assert_qgroups_uptodate(trans);
  2318. return 0;
  2319. }
  2320. int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
  2321. struct btrfs_root *root,
  2322. u64 bytenr, u64 num_bytes, u64 flags,
  2323. int is_data)
  2324. {
  2325. struct btrfs_delayed_extent_op *extent_op;
  2326. int ret;
  2327. extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
  2328. if (!extent_op)
  2329. return -ENOMEM;
  2330. extent_op->flags_to_set = flags;
  2331. extent_op->update_flags = 1;
  2332. extent_op->update_key = 0;
  2333. extent_op->is_data = is_data ? 1 : 0;
  2334. ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
  2335. num_bytes, extent_op);
  2336. if (ret)
  2337. kfree(extent_op);
  2338. return ret;
  2339. }
  2340. static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
  2341. struct btrfs_root *root,
  2342. struct btrfs_path *path,
  2343. u64 objectid, u64 offset, u64 bytenr)
  2344. {
  2345. struct btrfs_delayed_ref_head *head;
  2346. struct btrfs_delayed_ref_node *ref;
  2347. struct btrfs_delayed_data_ref *data_ref;
  2348. struct btrfs_delayed_ref_root *delayed_refs;
  2349. struct rb_node *node;
  2350. int ret = 0;
  2351. ret = -ENOENT;
  2352. delayed_refs = &trans->transaction->delayed_refs;
  2353. spin_lock(&delayed_refs->lock);
  2354. head = btrfs_find_delayed_ref_head(trans, bytenr);
  2355. if (!head)
  2356. goto out;
  2357. if (!mutex_trylock(&head->mutex)) {
  2358. atomic_inc(&head->node.refs);
  2359. spin_unlock(&delayed_refs->lock);
  2360. btrfs_release_path(path);
  2361. /*
  2362. * Mutex was contended, block until it's released and let
  2363. * caller try again
  2364. */
  2365. mutex_lock(&head->mutex);
  2366. mutex_unlock(&head->mutex);
  2367. btrfs_put_delayed_ref(&head->node);
  2368. return -EAGAIN;
  2369. }
  2370. node = rb_prev(&head->node.rb_node);
  2371. if (!node)
  2372. goto out_unlock;
  2373. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  2374. if (ref->bytenr != bytenr)
  2375. goto out_unlock;
  2376. ret = 1;
  2377. if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
  2378. goto out_unlock;
  2379. data_ref = btrfs_delayed_node_to_data_ref(ref);
  2380. node = rb_prev(node);
  2381. if (node) {
  2382. int seq = ref->seq;
  2383. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  2384. if (ref->bytenr == bytenr && ref->seq == seq)
  2385. goto out_unlock;
  2386. }
  2387. if (data_ref->root != root->root_key.objectid ||
  2388. data_ref->objectid != objectid || data_ref->offset != offset)
  2389. goto out_unlock;
  2390. ret = 0;
  2391. out_unlock:
  2392. mutex_unlock(&head->mutex);
  2393. out:
  2394. spin_unlock(&delayed_refs->lock);
  2395. return ret;
  2396. }
  2397. static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
  2398. struct btrfs_root *root,
  2399. struct btrfs_path *path,
  2400. u64 objectid, u64 offset, u64 bytenr)
  2401. {
  2402. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2403. struct extent_buffer *leaf;
  2404. struct btrfs_extent_data_ref *ref;
  2405. struct btrfs_extent_inline_ref *iref;
  2406. struct btrfs_extent_item *ei;
  2407. struct btrfs_key key;
  2408. u32 item_size;
  2409. int ret;
  2410. key.objectid = bytenr;
  2411. key.offset = (u64)-1;
  2412. key.type = BTRFS_EXTENT_ITEM_KEY;
  2413. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  2414. if (ret < 0)
  2415. goto out;
  2416. BUG_ON(ret == 0); /* Corruption */
  2417. ret = -ENOENT;
  2418. if (path->slots[0] == 0)
  2419. goto out;
  2420. path->slots[0]--;
  2421. leaf = path->nodes[0];
  2422. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2423. if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
  2424. goto out;
  2425. ret = 1;
  2426. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2427. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2428. if (item_size < sizeof(*ei)) {
  2429. WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  2430. goto out;
  2431. }
  2432. #endif
  2433. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2434. if (item_size != sizeof(*ei) +
  2435. btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
  2436. goto out;
  2437. if (btrfs_extent_generation(leaf, ei) <=
  2438. btrfs_root_last_snapshot(&root->root_item))
  2439. goto out;
  2440. iref = (struct btrfs_extent_inline_ref *)(ei + 1);
  2441. if (btrfs_extent_inline_ref_type(leaf, iref) !=
  2442. BTRFS_EXTENT_DATA_REF_KEY)
  2443. goto out;
  2444. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  2445. if (btrfs_extent_refs(leaf, ei) !=
  2446. btrfs_extent_data_ref_count(leaf, ref) ||
  2447. btrfs_extent_data_ref_root(leaf, ref) !=
  2448. root->root_key.objectid ||
  2449. btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
  2450. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  2451. goto out;
  2452. ret = 0;
  2453. out:
  2454. return ret;
  2455. }
  2456. int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
  2457. struct btrfs_root *root,
  2458. u64 objectid, u64 offset, u64 bytenr)
  2459. {
  2460. struct btrfs_path *path;
  2461. int ret;
  2462. int ret2;
  2463. path = btrfs_alloc_path();
  2464. if (!path)
  2465. return -ENOENT;
  2466. do {
  2467. ret = check_committed_ref(trans, root, path, objectid,
  2468. offset, bytenr);
  2469. if (ret && ret != -ENOENT)
  2470. goto out;
  2471. ret2 = check_delayed_ref(trans, root, path, objectid,
  2472. offset, bytenr);
  2473. } while (ret2 == -EAGAIN);
  2474. if (ret2 && ret2 != -ENOENT) {
  2475. ret = ret2;
  2476. goto out;
  2477. }
  2478. if (ret != -ENOENT || ret2 != -ENOENT)
  2479. ret = 0;
  2480. out:
  2481. btrfs_free_path(path);
  2482. if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
  2483. WARN_ON(ret > 0);
  2484. return ret;
  2485. }
  2486. static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
  2487. struct btrfs_root *root,
  2488. struct extent_buffer *buf,
  2489. int full_backref, int inc, int for_cow)
  2490. {
  2491. u64 bytenr;
  2492. u64 num_bytes;
  2493. u64 parent;
  2494. u64 ref_root;
  2495. u32 nritems;
  2496. struct btrfs_key key;
  2497. struct btrfs_file_extent_item *fi;
  2498. int i;
  2499. int level;
  2500. int ret = 0;
  2501. int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
  2502. u64, u64, u64, u64, u64, u64, int);
  2503. ref_root = btrfs_header_owner(buf);
  2504. nritems = btrfs_header_nritems(buf);
  2505. level = btrfs_header_level(buf);
  2506. if (!root->ref_cows && level == 0)
  2507. return 0;
  2508. if (inc)
  2509. process_func = btrfs_inc_extent_ref;
  2510. else
  2511. process_func = btrfs_free_extent;
  2512. if (full_backref)
  2513. parent = buf->start;
  2514. else
  2515. parent = 0;
  2516. for (i = 0; i < nritems; i++) {
  2517. if (level == 0) {
  2518. btrfs_item_key_to_cpu(buf, &key, i);
  2519. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  2520. continue;
  2521. fi = btrfs_item_ptr(buf, i,
  2522. struct btrfs_file_extent_item);
  2523. if (btrfs_file_extent_type(buf, fi) ==
  2524. BTRFS_FILE_EXTENT_INLINE)
  2525. continue;
  2526. bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  2527. if (bytenr == 0)
  2528. continue;
  2529. num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
  2530. key.offset -= btrfs_file_extent_offset(buf, fi);
  2531. ret = process_func(trans, root, bytenr, num_bytes,
  2532. parent, ref_root, key.objectid,
  2533. key.offset, for_cow);
  2534. if (ret)
  2535. goto fail;
  2536. } else {
  2537. bytenr = btrfs_node_blockptr(buf, i);
  2538. num_bytes = btrfs_level_size(root, level - 1);
  2539. ret = process_func(trans, root, bytenr, num_bytes,
  2540. parent, ref_root, level - 1, 0,
  2541. for_cow);
  2542. if (ret)
  2543. goto fail;
  2544. }
  2545. }
  2546. return 0;
  2547. fail:
  2548. return ret;
  2549. }
  2550. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2551. struct extent_buffer *buf, int full_backref, int for_cow)
  2552. {
  2553. return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
  2554. }
  2555. int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2556. struct extent_buffer *buf, int full_backref, int for_cow)
  2557. {
  2558. return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
  2559. }
  2560. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  2561. struct btrfs_root *root,
  2562. struct btrfs_path *path,
  2563. struct btrfs_block_group_cache *cache)
  2564. {
  2565. int ret;
  2566. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2567. unsigned long bi;
  2568. struct extent_buffer *leaf;
  2569. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  2570. if (ret < 0)
  2571. goto fail;
  2572. BUG_ON(ret); /* Corruption */
  2573. leaf = path->nodes[0];
  2574. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  2575. write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
  2576. btrfs_mark_buffer_dirty(leaf);
  2577. btrfs_release_path(path);
  2578. fail:
  2579. if (ret) {
  2580. btrfs_abort_transaction(trans, root, ret);
  2581. return ret;
  2582. }
  2583. return 0;
  2584. }
  2585. static struct btrfs_block_group_cache *
  2586. next_block_group(struct btrfs_root *root,
  2587. struct btrfs_block_group_cache *cache)
  2588. {
  2589. struct rb_node *node;
  2590. spin_lock(&root->fs_info->block_group_cache_lock);
  2591. node = rb_next(&cache->cache_node);
  2592. btrfs_put_block_group(cache);
  2593. if (node) {
  2594. cache = rb_entry(node, struct btrfs_block_group_cache,
  2595. cache_node);
  2596. btrfs_get_block_group(cache);
  2597. } else
  2598. cache = NULL;
  2599. spin_unlock(&root->fs_info->block_group_cache_lock);
  2600. return cache;
  2601. }
  2602. static int cache_save_setup(struct btrfs_block_group_cache *block_group,
  2603. struct btrfs_trans_handle *trans,
  2604. struct btrfs_path *path)
  2605. {
  2606. struct btrfs_root *root = block_group->fs_info->tree_root;
  2607. struct inode *inode = NULL;
  2608. u64 alloc_hint = 0;
  2609. int dcs = BTRFS_DC_ERROR;
  2610. int num_pages = 0;
  2611. int retries = 0;
  2612. int ret = 0;
  2613. /*
  2614. * If this block group is smaller than 100 megs don't bother caching the
  2615. * block group.
  2616. */
  2617. if (block_group->key.offset < (100 * 1024 * 1024)) {
  2618. spin_lock(&block_group->lock);
  2619. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  2620. spin_unlock(&block_group->lock);
  2621. return 0;
  2622. }
  2623. again:
  2624. inode = lookup_free_space_inode(root, block_group, path);
  2625. if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
  2626. ret = PTR_ERR(inode);
  2627. btrfs_release_path(path);
  2628. goto out;
  2629. }
  2630. if (IS_ERR(inode)) {
  2631. BUG_ON(retries);
  2632. retries++;
  2633. if (block_group->ro)
  2634. goto out_free;
  2635. ret = create_free_space_inode(root, trans, block_group, path);
  2636. if (ret)
  2637. goto out_free;
  2638. goto again;
  2639. }
  2640. /* We've already setup this transaction, go ahead and exit */
  2641. if (block_group->cache_generation == trans->transid &&
  2642. i_size_read(inode)) {
  2643. dcs = BTRFS_DC_SETUP;
  2644. goto out_put;
  2645. }
  2646. /*
  2647. * We want to set the generation to 0, that way if anything goes wrong
  2648. * from here on out we know not to trust this cache when we load up next
  2649. * time.
  2650. */
  2651. BTRFS_I(inode)->generation = 0;
  2652. ret = btrfs_update_inode(trans, root, inode);
  2653. WARN_ON(ret);
  2654. if (i_size_read(inode) > 0) {
  2655. ret = btrfs_truncate_free_space_cache(root, trans, path,
  2656. inode);
  2657. if (ret)
  2658. goto out_put;
  2659. }
  2660. spin_lock(&block_group->lock);
  2661. if (block_group->cached != BTRFS_CACHE_FINISHED ||
  2662. !btrfs_test_opt(root, SPACE_CACHE)) {
  2663. /*
  2664. * don't bother trying to write stuff out _if_
  2665. * a) we're not cached,
  2666. * b) we're with nospace_cache mount option.
  2667. */
  2668. dcs = BTRFS_DC_WRITTEN;
  2669. spin_unlock(&block_group->lock);
  2670. goto out_put;
  2671. }
  2672. spin_unlock(&block_group->lock);
  2673. /*
  2674. * Try to preallocate enough space based on how big the block group is.
  2675. * Keep in mind this has to include any pinned space which could end up
  2676. * taking up quite a bit since it's not folded into the other space
  2677. * cache.
  2678. */
  2679. num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
  2680. if (!num_pages)
  2681. num_pages = 1;
  2682. num_pages *= 16;
  2683. num_pages *= PAGE_CACHE_SIZE;
  2684. ret = btrfs_check_data_free_space(inode, num_pages);
  2685. if (ret)
  2686. goto out_put;
  2687. ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
  2688. num_pages, num_pages,
  2689. &alloc_hint);
  2690. if (!ret)
  2691. dcs = BTRFS_DC_SETUP;
  2692. btrfs_free_reserved_data_space(inode, num_pages);
  2693. out_put:
  2694. iput(inode);
  2695. out_free:
  2696. btrfs_release_path(path);
  2697. out:
  2698. spin_lock(&block_group->lock);
  2699. if (!ret && dcs == BTRFS_DC_SETUP)
  2700. block_group->cache_generation = trans->transid;
  2701. block_group->disk_cache_state = dcs;
  2702. spin_unlock(&block_group->lock);
  2703. return ret;
  2704. }
  2705. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  2706. struct btrfs_root *root)
  2707. {
  2708. struct btrfs_block_group_cache *cache;
  2709. int err = 0;
  2710. struct btrfs_path *path;
  2711. u64 last = 0;
  2712. path = btrfs_alloc_path();
  2713. if (!path)
  2714. return -ENOMEM;
  2715. again:
  2716. while (1) {
  2717. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2718. while (cache) {
  2719. if (cache->disk_cache_state == BTRFS_DC_CLEAR)
  2720. break;
  2721. cache = next_block_group(root, cache);
  2722. }
  2723. if (!cache) {
  2724. if (last == 0)
  2725. break;
  2726. last = 0;
  2727. continue;
  2728. }
  2729. err = cache_save_setup(cache, trans, path);
  2730. last = cache->key.objectid + cache->key.offset;
  2731. btrfs_put_block_group(cache);
  2732. }
  2733. while (1) {
  2734. if (last == 0) {
  2735. err = btrfs_run_delayed_refs(trans, root,
  2736. (unsigned long)-1);
  2737. if (err) /* File system offline */
  2738. goto out;
  2739. }
  2740. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2741. while (cache) {
  2742. if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
  2743. btrfs_put_block_group(cache);
  2744. goto again;
  2745. }
  2746. if (cache->dirty)
  2747. break;
  2748. cache = next_block_group(root, cache);
  2749. }
  2750. if (!cache) {
  2751. if (last == 0)
  2752. break;
  2753. last = 0;
  2754. continue;
  2755. }
  2756. if (cache->disk_cache_state == BTRFS_DC_SETUP)
  2757. cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
  2758. cache->dirty = 0;
  2759. last = cache->key.objectid + cache->key.offset;
  2760. err = write_one_cache_group(trans, root, path, cache);
  2761. if (err) /* File system offline */
  2762. goto out;
  2763. btrfs_put_block_group(cache);
  2764. }
  2765. while (1) {
  2766. /*
  2767. * I don't think this is needed since we're just marking our
  2768. * preallocated extent as written, but just in case it can't
  2769. * hurt.
  2770. */
  2771. if (last == 0) {
  2772. err = btrfs_run_delayed_refs(trans, root,
  2773. (unsigned long)-1);
  2774. if (err) /* File system offline */
  2775. goto out;
  2776. }
  2777. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2778. while (cache) {
  2779. /*
  2780. * Really this shouldn't happen, but it could if we
  2781. * couldn't write the entire preallocated extent and
  2782. * splitting the extent resulted in a new block.
  2783. */
  2784. if (cache->dirty) {
  2785. btrfs_put_block_group(cache);
  2786. goto again;
  2787. }
  2788. if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  2789. break;
  2790. cache = next_block_group(root, cache);
  2791. }
  2792. if (!cache) {
  2793. if (last == 0)
  2794. break;
  2795. last = 0;
  2796. continue;
  2797. }
  2798. err = btrfs_write_out_cache(root, trans, cache, path);
  2799. /*
  2800. * If we didn't have an error then the cache state is still
  2801. * NEED_WRITE, so we can set it to WRITTEN.
  2802. */
  2803. if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  2804. cache->disk_cache_state = BTRFS_DC_WRITTEN;
  2805. last = cache->key.objectid + cache->key.offset;
  2806. btrfs_put_block_group(cache);
  2807. }
  2808. out:
  2809. btrfs_free_path(path);
  2810. return err;
  2811. }
  2812. int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
  2813. {
  2814. struct btrfs_block_group_cache *block_group;
  2815. int readonly = 0;
  2816. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  2817. if (!block_group || block_group->ro)
  2818. readonly = 1;
  2819. if (block_group)
  2820. btrfs_put_block_group(block_group);
  2821. return readonly;
  2822. }
  2823. static int update_space_info(struct btrfs_fs_info *info, u64 flags,
  2824. u64 total_bytes, u64 bytes_used,
  2825. struct btrfs_space_info **space_info)
  2826. {
  2827. struct btrfs_space_info *found;
  2828. int i;
  2829. int factor;
  2830. if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
  2831. BTRFS_BLOCK_GROUP_RAID10))
  2832. factor = 2;
  2833. else
  2834. factor = 1;
  2835. found = __find_space_info(info, flags);
  2836. if (found) {
  2837. spin_lock(&found->lock);
  2838. found->total_bytes += total_bytes;
  2839. found->disk_total += total_bytes * factor;
  2840. found->bytes_used += bytes_used;
  2841. found->disk_used += bytes_used * factor;
  2842. found->full = 0;
  2843. spin_unlock(&found->lock);
  2844. *space_info = found;
  2845. return 0;
  2846. }
  2847. found = kzalloc(sizeof(*found), GFP_NOFS);
  2848. if (!found)
  2849. return -ENOMEM;
  2850. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  2851. INIT_LIST_HEAD(&found->block_groups[i]);
  2852. init_rwsem(&found->groups_sem);
  2853. spin_lock_init(&found->lock);
  2854. found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
  2855. found->total_bytes = total_bytes;
  2856. found->disk_total = total_bytes * factor;
  2857. found->bytes_used = bytes_used;
  2858. found->disk_used = bytes_used * factor;
  2859. found->bytes_pinned = 0;
  2860. found->bytes_reserved = 0;
  2861. found->bytes_readonly = 0;
  2862. found->bytes_may_use = 0;
  2863. found->full = 0;
  2864. found->force_alloc = CHUNK_ALLOC_NO_FORCE;
  2865. found->chunk_alloc = 0;
  2866. found->flush = 0;
  2867. init_waitqueue_head(&found->wait);
  2868. *space_info = found;
  2869. list_add_rcu(&found->list, &info->space_info);
  2870. if (flags & BTRFS_BLOCK_GROUP_DATA)
  2871. info->data_sinfo = found;
  2872. return 0;
  2873. }
  2874. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  2875. {
  2876. u64 extra_flags = chunk_to_extended(flags) &
  2877. BTRFS_EXTENDED_PROFILE_MASK;
  2878. if (flags & BTRFS_BLOCK_GROUP_DATA)
  2879. fs_info->avail_data_alloc_bits |= extra_flags;
  2880. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  2881. fs_info->avail_metadata_alloc_bits |= extra_flags;
  2882. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  2883. fs_info->avail_system_alloc_bits |= extra_flags;
  2884. }
  2885. /*
  2886. * returns target flags in extended format or 0 if restripe for this
  2887. * chunk_type is not in progress
  2888. *
  2889. * should be called with either volume_mutex or balance_lock held
  2890. */
  2891. static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  2892. {
  2893. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  2894. u64 target = 0;
  2895. if (!bctl)
  2896. return 0;
  2897. if (flags & BTRFS_BLOCK_GROUP_DATA &&
  2898. bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  2899. target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  2900. } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  2901. bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  2902. target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  2903. } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  2904. bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  2905. target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  2906. }
  2907. return target;
  2908. }
  2909. /*
  2910. * @flags: available profiles in extended format (see ctree.h)
  2911. *
  2912. * Returns reduced profile in chunk format. If profile changing is in
  2913. * progress (either running or paused) picks the target profile (if it's
  2914. * already available), otherwise falls back to plain reducing.
  2915. */
  2916. u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
  2917. {
  2918. /*
  2919. * we add in the count of missing devices because we want
  2920. * to make sure that any RAID levels on a degraded FS
  2921. * continue to be honored.
  2922. */
  2923. u64 num_devices = root->fs_info->fs_devices->rw_devices +
  2924. root->fs_info->fs_devices->missing_devices;
  2925. u64 target;
  2926. /*
  2927. * see if restripe for this chunk_type is in progress, if so
  2928. * try to reduce to the target profile
  2929. */
  2930. spin_lock(&root->fs_info->balance_lock);
  2931. target = get_restripe_target(root->fs_info, flags);
  2932. if (target) {
  2933. /* pick target profile only if it's already available */
  2934. if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
  2935. spin_unlock(&root->fs_info->balance_lock);
  2936. return extended_to_chunk(target);
  2937. }
  2938. }
  2939. spin_unlock(&root->fs_info->balance_lock);
  2940. if (num_devices == 1)
  2941. flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
  2942. if (num_devices < 4)
  2943. flags &= ~BTRFS_BLOCK_GROUP_RAID10;
  2944. if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
  2945. (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  2946. BTRFS_BLOCK_GROUP_RAID10))) {
  2947. flags &= ~BTRFS_BLOCK_GROUP_DUP;
  2948. }
  2949. if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
  2950. (flags & BTRFS_BLOCK_GROUP_RAID10)) {
  2951. flags &= ~BTRFS_BLOCK_GROUP_RAID1;
  2952. }
  2953. if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
  2954. ((flags & BTRFS_BLOCK_GROUP_RAID1) |
  2955. (flags & BTRFS_BLOCK_GROUP_RAID10) |
  2956. (flags & BTRFS_BLOCK_GROUP_DUP))) {
  2957. flags &= ~BTRFS_BLOCK_GROUP_RAID0;
  2958. }
  2959. return extended_to_chunk(flags);
  2960. }
  2961. static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
  2962. {
  2963. if (flags & BTRFS_BLOCK_GROUP_DATA)
  2964. flags |= root->fs_info->avail_data_alloc_bits;
  2965. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  2966. flags |= root->fs_info->avail_system_alloc_bits;
  2967. else if (flags & BTRFS_BLOCK_GROUP_METADATA)
  2968. flags |= root->fs_info->avail_metadata_alloc_bits;
  2969. return btrfs_reduce_alloc_profile(root, flags);
  2970. }
  2971. u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
  2972. {
  2973. u64 flags;
  2974. if (data)
  2975. flags = BTRFS_BLOCK_GROUP_DATA;
  2976. else if (root == root->fs_info->chunk_root)
  2977. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  2978. else
  2979. flags = BTRFS_BLOCK_GROUP_METADATA;
  2980. return get_alloc_profile(root, flags);
  2981. }
  2982. /*
  2983. * This will check the space that the inode allocates from to make sure we have
  2984. * enough space for bytes.
  2985. */
  2986. int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
  2987. {
  2988. struct btrfs_space_info *data_sinfo;
  2989. struct btrfs_root *root = BTRFS_I(inode)->root;
  2990. struct btrfs_fs_info *fs_info = root->fs_info;
  2991. u64 used;
  2992. int ret = 0, committed = 0, alloc_chunk = 1;
  2993. /* make sure bytes are sectorsize aligned */
  2994. bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  2995. if (root == root->fs_info->tree_root ||
  2996. BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
  2997. alloc_chunk = 0;
  2998. committed = 1;
  2999. }
  3000. data_sinfo = fs_info->data_sinfo;
  3001. if (!data_sinfo)
  3002. goto alloc;
  3003. again:
  3004. /* make sure we have enough space to handle the data first */
  3005. spin_lock(&data_sinfo->lock);
  3006. used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
  3007. data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
  3008. data_sinfo->bytes_may_use;
  3009. if (used + bytes > data_sinfo->total_bytes) {
  3010. struct btrfs_trans_handle *trans;
  3011. /*
  3012. * if we don't have enough free bytes in this space then we need
  3013. * to alloc a new chunk.
  3014. */
  3015. if (!data_sinfo->full && alloc_chunk) {
  3016. u64 alloc_target;
  3017. data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
  3018. spin_unlock(&data_sinfo->lock);
  3019. alloc:
  3020. alloc_target = btrfs_get_alloc_profile(root, 1);
  3021. trans = btrfs_join_transaction(root);
  3022. if (IS_ERR(trans))
  3023. return PTR_ERR(trans);
  3024. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  3025. bytes + 2 * 1024 * 1024,
  3026. alloc_target,
  3027. CHUNK_ALLOC_NO_FORCE);
  3028. btrfs_end_transaction(trans, root);
  3029. if (ret < 0) {
  3030. if (ret != -ENOSPC)
  3031. return ret;
  3032. else
  3033. goto commit_trans;
  3034. }
  3035. if (!data_sinfo)
  3036. data_sinfo = fs_info->data_sinfo;
  3037. goto again;
  3038. }
  3039. /*
  3040. * If we have less pinned bytes than we want to allocate then
  3041. * don't bother committing the transaction, it won't help us.
  3042. */
  3043. if (data_sinfo->bytes_pinned < bytes)
  3044. committed = 1;
  3045. spin_unlock(&data_sinfo->lock);
  3046. /* commit the current transaction and try again */
  3047. commit_trans:
  3048. if (!committed &&
  3049. !atomic_read(&root->fs_info->open_ioctl_trans)) {
  3050. committed = 1;
  3051. trans = btrfs_join_transaction(root);
  3052. if (IS_ERR(trans))
  3053. return PTR_ERR(trans);
  3054. ret = btrfs_commit_transaction(trans, root);
  3055. if (ret)
  3056. return ret;
  3057. goto again;
  3058. }
  3059. return -ENOSPC;
  3060. }
  3061. data_sinfo->bytes_may_use += bytes;
  3062. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3063. data_sinfo->flags, bytes, 1);
  3064. spin_unlock(&data_sinfo->lock);
  3065. return 0;
  3066. }
  3067. /*
  3068. * Called if we need to clear a data reservation for this inode.
  3069. */
  3070. void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
  3071. {
  3072. struct btrfs_root *root = BTRFS_I(inode)->root;
  3073. struct btrfs_space_info *data_sinfo;
  3074. /* make sure bytes are sectorsize aligned */
  3075. bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  3076. data_sinfo = root->fs_info->data_sinfo;
  3077. spin_lock(&data_sinfo->lock);
  3078. data_sinfo->bytes_may_use -= bytes;
  3079. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3080. data_sinfo->flags, bytes, 0);
  3081. spin_unlock(&data_sinfo->lock);
  3082. }
  3083. static void force_metadata_allocation(struct btrfs_fs_info *info)
  3084. {
  3085. struct list_head *head = &info->space_info;
  3086. struct btrfs_space_info *found;
  3087. rcu_read_lock();
  3088. list_for_each_entry_rcu(found, head, list) {
  3089. if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
  3090. found->force_alloc = CHUNK_ALLOC_FORCE;
  3091. }
  3092. rcu_read_unlock();
  3093. }
  3094. static int should_alloc_chunk(struct btrfs_root *root,
  3095. struct btrfs_space_info *sinfo, u64 alloc_bytes,
  3096. int force)
  3097. {
  3098. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3099. u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
  3100. u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
  3101. u64 thresh;
  3102. if (force == CHUNK_ALLOC_FORCE)
  3103. return 1;
  3104. /*
  3105. * We need to take into account the global rsv because for all intents
  3106. * and purposes it's used space. Don't worry about locking the
  3107. * global_rsv, it doesn't change except when the transaction commits.
  3108. */
  3109. if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
  3110. num_allocated += global_rsv->size;
  3111. /*
  3112. * in limited mode, we want to have some free space up to
  3113. * about 1% of the FS size.
  3114. */
  3115. if (force == CHUNK_ALLOC_LIMITED) {
  3116. thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
  3117. thresh = max_t(u64, 64 * 1024 * 1024,
  3118. div_factor_fine(thresh, 1));
  3119. if (num_bytes - num_allocated < thresh)
  3120. return 1;
  3121. }
  3122. if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
  3123. return 0;
  3124. return 1;
  3125. }
  3126. static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
  3127. {
  3128. u64 num_dev;
  3129. if (type & BTRFS_BLOCK_GROUP_RAID10 ||
  3130. type & BTRFS_BLOCK_GROUP_RAID0)
  3131. num_dev = root->fs_info->fs_devices->rw_devices;
  3132. else if (type & BTRFS_BLOCK_GROUP_RAID1)
  3133. num_dev = 2;
  3134. else
  3135. num_dev = 1; /* DUP or single */
  3136. /* metadata for updaing devices and chunk tree */
  3137. return btrfs_calc_trans_metadata_size(root, num_dev + 1);
  3138. }
  3139. static void check_system_chunk(struct btrfs_trans_handle *trans,
  3140. struct btrfs_root *root, u64 type)
  3141. {
  3142. struct btrfs_space_info *info;
  3143. u64 left;
  3144. u64 thresh;
  3145. info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3146. spin_lock(&info->lock);
  3147. left = info->total_bytes - info->bytes_used - info->bytes_pinned -
  3148. info->bytes_reserved - info->bytes_readonly;
  3149. spin_unlock(&info->lock);
  3150. thresh = get_system_chunk_thresh(root, type);
  3151. if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
  3152. printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
  3153. left, thresh, type);
  3154. dump_space_info(info, 0, 0);
  3155. }
  3156. if (left < thresh) {
  3157. u64 flags;
  3158. flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
  3159. btrfs_alloc_chunk(trans, root, flags);
  3160. }
  3161. }
  3162. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  3163. struct btrfs_root *extent_root, u64 alloc_bytes,
  3164. u64 flags, int force)
  3165. {
  3166. struct btrfs_space_info *space_info;
  3167. struct btrfs_fs_info *fs_info = extent_root->fs_info;
  3168. int wait_for_alloc = 0;
  3169. int ret = 0;
  3170. space_info = __find_space_info(extent_root->fs_info, flags);
  3171. if (!space_info) {
  3172. ret = update_space_info(extent_root->fs_info, flags,
  3173. 0, 0, &space_info);
  3174. BUG_ON(ret); /* -ENOMEM */
  3175. }
  3176. BUG_ON(!space_info); /* Logic error */
  3177. again:
  3178. spin_lock(&space_info->lock);
  3179. if (force < space_info->force_alloc)
  3180. force = space_info->force_alloc;
  3181. if (space_info->full) {
  3182. spin_unlock(&space_info->lock);
  3183. return 0;
  3184. }
  3185. if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
  3186. spin_unlock(&space_info->lock);
  3187. return 0;
  3188. } else if (space_info->chunk_alloc) {
  3189. wait_for_alloc = 1;
  3190. } else {
  3191. space_info->chunk_alloc = 1;
  3192. }
  3193. spin_unlock(&space_info->lock);
  3194. mutex_lock(&fs_info->chunk_mutex);
  3195. /*
  3196. * The chunk_mutex is held throughout the entirety of a chunk
  3197. * allocation, so once we've acquired the chunk_mutex we know that the
  3198. * other guy is done and we need to recheck and see if we should
  3199. * allocate.
  3200. */
  3201. if (wait_for_alloc) {
  3202. mutex_unlock(&fs_info->chunk_mutex);
  3203. wait_for_alloc = 0;
  3204. goto again;
  3205. }
  3206. /*
  3207. * If we have mixed data/metadata chunks we want to make sure we keep
  3208. * allocating mixed chunks instead of individual chunks.
  3209. */
  3210. if (btrfs_mixed_space_info(space_info))
  3211. flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
  3212. /*
  3213. * if we're doing a data chunk, go ahead and make sure that
  3214. * we keep a reasonable number of metadata chunks allocated in the
  3215. * FS as well.
  3216. */
  3217. if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
  3218. fs_info->data_chunk_allocations++;
  3219. if (!(fs_info->data_chunk_allocations %
  3220. fs_info->metadata_ratio))
  3221. force_metadata_allocation(fs_info);
  3222. }
  3223. /*
  3224. * Check if we have enough space in SYSTEM chunk because we may need
  3225. * to update devices.
  3226. */
  3227. check_system_chunk(trans, extent_root, flags);
  3228. ret = btrfs_alloc_chunk(trans, extent_root, flags);
  3229. if (ret < 0 && ret != -ENOSPC)
  3230. goto out;
  3231. spin_lock(&space_info->lock);
  3232. if (ret)
  3233. space_info->full = 1;
  3234. else
  3235. ret = 1;
  3236. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  3237. space_info->chunk_alloc = 0;
  3238. spin_unlock(&space_info->lock);
  3239. out:
  3240. mutex_unlock(&fs_info->chunk_mutex);
  3241. return ret;
  3242. }
  3243. static int can_overcommit(struct btrfs_root *root,
  3244. struct btrfs_space_info *space_info, u64 bytes,
  3245. int flush)
  3246. {
  3247. u64 profile = btrfs_get_alloc_profile(root, 0);
  3248. u64 avail;
  3249. u64 used;
  3250. used = space_info->bytes_used + space_info->bytes_reserved +
  3251. space_info->bytes_pinned + space_info->bytes_readonly +
  3252. space_info->bytes_may_use;
  3253. spin_lock(&root->fs_info->free_chunk_lock);
  3254. avail = root->fs_info->free_chunk_space;
  3255. spin_unlock(&root->fs_info->free_chunk_lock);
  3256. /*
  3257. * If we have dup, raid1 or raid10 then only half of the free
  3258. * space is actually useable.
  3259. */
  3260. if (profile & (BTRFS_BLOCK_GROUP_DUP |
  3261. BTRFS_BLOCK_GROUP_RAID1 |
  3262. BTRFS_BLOCK_GROUP_RAID10))
  3263. avail >>= 1;
  3264. /*
  3265. * If we aren't flushing don't let us overcommit too much, say
  3266. * 1/8th of the space. If we can flush, let it overcommit up to
  3267. * 1/2 of the space.
  3268. */
  3269. if (flush)
  3270. avail >>= 3;
  3271. else
  3272. avail >>= 1;
  3273. if (used + bytes < space_info->total_bytes + avail)
  3274. return 1;
  3275. return 0;
  3276. }
  3277. /*
  3278. * shrink metadata reservation for delalloc
  3279. */
  3280. static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
  3281. bool wait_ordered)
  3282. {
  3283. struct btrfs_block_rsv *block_rsv;
  3284. struct btrfs_space_info *space_info;
  3285. struct btrfs_trans_handle *trans;
  3286. u64 delalloc_bytes;
  3287. u64 max_reclaim;
  3288. long time_left;
  3289. unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
  3290. int loops = 0;
  3291. trans = (struct btrfs_trans_handle *)current->journal_info;
  3292. block_rsv = &root->fs_info->delalloc_block_rsv;
  3293. space_info = block_rsv->space_info;
  3294. smp_mb();
  3295. delalloc_bytes = root->fs_info->delalloc_bytes;
  3296. if (delalloc_bytes == 0) {
  3297. if (trans)
  3298. return;
  3299. btrfs_wait_ordered_extents(root, 0, 0);
  3300. return;
  3301. }
  3302. while (delalloc_bytes && loops < 3) {
  3303. max_reclaim = min(delalloc_bytes, to_reclaim);
  3304. nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
  3305. writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
  3306. WB_REASON_FS_FREE_SPACE);
  3307. /*
  3308. * We need to wait for the async pages to actually start before
  3309. * we do anything.
  3310. */
  3311. wait_event(root->fs_info->async_submit_wait,
  3312. !atomic_read(&root->fs_info->async_delalloc_pages));
  3313. spin_lock(&space_info->lock);
  3314. if (can_overcommit(root, space_info, orig, !trans)) {
  3315. spin_unlock(&space_info->lock);
  3316. break;
  3317. }
  3318. spin_unlock(&space_info->lock);
  3319. loops++;
  3320. if (wait_ordered && !trans) {
  3321. btrfs_wait_ordered_extents(root, 0, 0);
  3322. } else {
  3323. time_left = schedule_timeout_killable(1);
  3324. if (time_left)
  3325. break;
  3326. }
  3327. smp_mb();
  3328. delalloc_bytes = root->fs_info->delalloc_bytes;
  3329. }
  3330. }
  3331. /**
  3332. * maybe_commit_transaction - possibly commit the transaction if its ok to
  3333. * @root - the root we're allocating for
  3334. * @bytes - the number of bytes we want to reserve
  3335. * @force - force the commit
  3336. *
  3337. * This will check to make sure that committing the transaction will actually
  3338. * get us somewhere and then commit the transaction if it does. Otherwise it
  3339. * will return -ENOSPC.
  3340. */
  3341. static int may_commit_transaction(struct btrfs_root *root,
  3342. struct btrfs_space_info *space_info,
  3343. u64 bytes, int force)
  3344. {
  3345. struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
  3346. struct btrfs_trans_handle *trans;
  3347. trans = (struct btrfs_trans_handle *)current->journal_info;
  3348. if (trans)
  3349. return -EAGAIN;
  3350. if (force)
  3351. goto commit;
  3352. /* See if there is enough pinned space to make this reservation */
  3353. spin_lock(&space_info->lock);
  3354. if (space_info->bytes_pinned >= bytes) {
  3355. spin_unlock(&space_info->lock);
  3356. goto commit;
  3357. }
  3358. spin_unlock(&space_info->lock);
  3359. /*
  3360. * See if there is some space in the delayed insertion reservation for
  3361. * this reservation.
  3362. */
  3363. if (space_info != delayed_rsv->space_info)
  3364. return -ENOSPC;
  3365. spin_lock(&space_info->lock);
  3366. spin_lock(&delayed_rsv->lock);
  3367. if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
  3368. spin_unlock(&delayed_rsv->lock);
  3369. spin_unlock(&space_info->lock);
  3370. return -ENOSPC;
  3371. }
  3372. spin_unlock(&delayed_rsv->lock);
  3373. spin_unlock(&space_info->lock);
  3374. commit:
  3375. trans = btrfs_join_transaction(root);
  3376. if (IS_ERR(trans))
  3377. return -ENOSPC;
  3378. return btrfs_commit_transaction(trans, root);
  3379. }
  3380. enum flush_state {
  3381. FLUSH_DELALLOC = 1,
  3382. FLUSH_DELALLOC_WAIT = 2,
  3383. FLUSH_DELAYED_ITEMS_NR = 3,
  3384. FLUSH_DELAYED_ITEMS = 4,
  3385. COMMIT_TRANS = 5,
  3386. };
  3387. static int flush_space(struct btrfs_root *root,
  3388. struct btrfs_space_info *space_info, u64 num_bytes,
  3389. u64 orig_bytes, int state)
  3390. {
  3391. struct btrfs_trans_handle *trans;
  3392. int nr;
  3393. int ret = 0;
  3394. switch (state) {
  3395. case FLUSH_DELALLOC:
  3396. case FLUSH_DELALLOC_WAIT:
  3397. shrink_delalloc(root, num_bytes, orig_bytes,
  3398. state == FLUSH_DELALLOC_WAIT);
  3399. break;
  3400. case FLUSH_DELAYED_ITEMS_NR:
  3401. case FLUSH_DELAYED_ITEMS:
  3402. if (state == FLUSH_DELAYED_ITEMS_NR) {
  3403. u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
  3404. nr = (int)div64_u64(num_bytes, bytes);
  3405. if (!nr)
  3406. nr = 1;
  3407. nr *= 2;
  3408. } else {
  3409. nr = -1;
  3410. }
  3411. trans = btrfs_join_transaction(root);
  3412. if (IS_ERR(trans)) {
  3413. ret = PTR_ERR(trans);
  3414. break;
  3415. }
  3416. ret = btrfs_run_delayed_items_nr(trans, root, nr);
  3417. btrfs_end_transaction(trans, root);
  3418. break;
  3419. case COMMIT_TRANS:
  3420. ret = may_commit_transaction(root, space_info, orig_bytes, 0);
  3421. break;
  3422. default:
  3423. ret = -ENOSPC;
  3424. break;
  3425. }
  3426. return ret;
  3427. }
  3428. /**
  3429. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  3430. * @root - the root we're allocating for
  3431. * @block_rsv - the block_rsv we're allocating for
  3432. * @orig_bytes - the number of bytes we want
  3433. * @flush - wether or not we can flush to make our reservation
  3434. *
  3435. * This will reserve orgi_bytes number of bytes from the space info associated
  3436. * with the block_rsv. If there is not enough space it will make an attempt to
  3437. * flush out space to make room. It will do this by flushing delalloc if
  3438. * possible or committing the transaction. If flush is 0 then no attempts to
  3439. * regain reservations will be made and this will fail if there is not enough
  3440. * space already.
  3441. */
  3442. static int reserve_metadata_bytes(struct btrfs_root *root,
  3443. struct btrfs_block_rsv *block_rsv,
  3444. u64 orig_bytes, int flush)
  3445. {
  3446. struct btrfs_space_info *space_info = block_rsv->space_info;
  3447. u64 used;
  3448. u64 num_bytes = orig_bytes;
  3449. int flush_state = FLUSH_DELALLOC;
  3450. int ret = 0;
  3451. bool flushing = false;
  3452. bool committed = false;
  3453. again:
  3454. ret = 0;
  3455. spin_lock(&space_info->lock);
  3456. /*
  3457. * We only want to wait if somebody other than us is flushing and we are
  3458. * actually alloed to flush.
  3459. */
  3460. while (flush && !flushing && space_info->flush) {
  3461. spin_unlock(&space_info->lock);
  3462. /*
  3463. * If we have a trans handle we can't wait because the flusher
  3464. * may have to commit the transaction, which would mean we would
  3465. * deadlock since we are waiting for the flusher to finish, but
  3466. * hold the current transaction open.
  3467. */
  3468. if (current->journal_info)
  3469. return -EAGAIN;
  3470. ret = wait_event_killable(space_info->wait, !space_info->flush);
  3471. /* Must have been killed, return */
  3472. if (ret)
  3473. return -EINTR;
  3474. spin_lock(&space_info->lock);
  3475. }
  3476. ret = -ENOSPC;
  3477. used = space_info->bytes_used + space_info->bytes_reserved +
  3478. space_info->bytes_pinned + space_info->bytes_readonly +
  3479. space_info->bytes_may_use;
  3480. /*
  3481. * The idea here is that we've not already over-reserved the block group
  3482. * then we can go ahead and save our reservation first and then start
  3483. * flushing if we need to. Otherwise if we've already overcommitted
  3484. * lets start flushing stuff first and then come back and try to make
  3485. * our reservation.
  3486. */
  3487. if (used <= space_info->total_bytes) {
  3488. if (used + orig_bytes <= space_info->total_bytes) {
  3489. space_info->bytes_may_use += orig_bytes;
  3490. trace_btrfs_space_reservation(root->fs_info,
  3491. "space_info", space_info->flags, orig_bytes, 1);
  3492. ret = 0;
  3493. } else {
  3494. /*
  3495. * Ok set num_bytes to orig_bytes since we aren't
  3496. * overocmmitted, this way we only try and reclaim what
  3497. * we need.
  3498. */
  3499. num_bytes = orig_bytes;
  3500. }
  3501. } else {
  3502. /*
  3503. * Ok we're over committed, set num_bytes to the overcommitted
  3504. * amount plus the amount of bytes that we need for this
  3505. * reservation.
  3506. */
  3507. num_bytes = used - space_info->total_bytes +
  3508. (orig_bytes * 2);
  3509. }
  3510. if (ret) {
  3511. u64 avail;
  3512. /*
  3513. * If we have a lot of space that's pinned, don't bother doing
  3514. * the overcommit dance yet and just commit the transaction.
  3515. */
  3516. avail = (space_info->total_bytes - space_info->bytes_used) * 8;
  3517. do_div(avail, 10);
  3518. if (space_info->bytes_pinned >= avail && flush && !committed) {
  3519. space_info->flush = 1;
  3520. flushing = true;
  3521. spin_unlock(&space_info->lock);
  3522. ret = may_commit_transaction(root, space_info,
  3523. orig_bytes, 1);
  3524. if (ret)
  3525. goto out;
  3526. committed = true;
  3527. goto again;
  3528. }
  3529. if (can_overcommit(root, space_info, orig_bytes, flush)) {
  3530. space_info->bytes_may_use += orig_bytes;
  3531. trace_btrfs_space_reservation(root->fs_info,
  3532. "space_info", space_info->flags, orig_bytes, 1);
  3533. ret = 0;
  3534. }
  3535. }
  3536. /*
  3537. * Couldn't make our reservation, save our place so while we're trying
  3538. * to reclaim space we can actually use it instead of somebody else
  3539. * stealing it from us.
  3540. */
  3541. if (ret && flush) {
  3542. flushing = true;
  3543. space_info->flush = 1;
  3544. }
  3545. spin_unlock(&space_info->lock);
  3546. if (!ret || !flush)
  3547. goto out;
  3548. ret = flush_space(root, space_info, num_bytes, orig_bytes,
  3549. flush_state);
  3550. flush_state++;
  3551. if (!ret)
  3552. goto again;
  3553. else if (flush_state <= COMMIT_TRANS)
  3554. goto again;
  3555. out:
  3556. if (flushing) {
  3557. spin_lock(&space_info->lock);
  3558. space_info->flush = 0;
  3559. wake_up_all(&space_info->wait);
  3560. spin_unlock(&space_info->lock);
  3561. }
  3562. return ret;
  3563. }
  3564. static struct btrfs_block_rsv *get_block_rsv(
  3565. const struct btrfs_trans_handle *trans,
  3566. const struct btrfs_root *root)
  3567. {
  3568. struct btrfs_block_rsv *block_rsv = NULL;
  3569. if (root->ref_cows)
  3570. block_rsv = trans->block_rsv;
  3571. if (root == root->fs_info->csum_root && trans->adding_csums)
  3572. block_rsv = trans->block_rsv;
  3573. if (!block_rsv)
  3574. block_rsv = root->block_rsv;
  3575. if (!block_rsv)
  3576. block_rsv = &root->fs_info->empty_block_rsv;
  3577. return block_rsv;
  3578. }
  3579. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  3580. u64 num_bytes)
  3581. {
  3582. int ret = -ENOSPC;
  3583. spin_lock(&block_rsv->lock);
  3584. if (block_rsv->reserved >= num_bytes) {
  3585. block_rsv->reserved -= num_bytes;
  3586. if (block_rsv->reserved < block_rsv->size)
  3587. block_rsv->full = 0;
  3588. ret = 0;
  3589. }
  3590. spin_unlock(&block_rsv->lock);
  3591. return ret;
  3592. }
  3593. static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
  3594. u64 num_bytes, int update_size)
  3595. {
  3596. spin_lock(&block_rsv->lock);
  3597. block_rsv->reserved += num_bytes;
  3598. if (update_size)
  3599. block_rsv->size += num_bytes;
  3600. else if (block_rsv->reserved >= block_rsv->size)
  3601. block_rsv->full = 1;
  3602. spin_unlock(&block_rsv->lock);
  3603. }
  3604. static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
  3605. struct btrfs_block_rsv *block_rsv,
  3606. struct btrfs_block_rsv *dest, u64 num_bytes)
  3607. {
  3608. struct btrfs_space_info *space_info = block_rsv->space_info;
  3609. spin_lock(&block_rsv->lock);
  3610. if (num_bytes == (u64)-1)
  3611. num_bytes = block_rsv->size;
  3612. block_rsv->size -= num_bytes;
  3613. if (block_rsv->reserved >= block_rsv->size) {
  3614. num_bytes = block_rsv->reserved - block_rsv->size;
  3615. block_rsv->reserved = block_rsv->size;
  3616. block_rsv->full = 1;
  3617. } else {
  3618. num_bytes = 0;
  3619. }
  3620. spin_unlock(&block_rsv->lock);
  3621. if (num_bytes > 0) {
  3622. if (dest) {
  3623. spin_lock(&dest->lock);
  3624. if (!dest->full) {
  3625. u64 bytes_to_add;
  3626. bytes_to_add = dest->size - dest->reserved;
  3627. bytes_to_add = min(num_bytes, bytes_to_add);
  3628. dest->reserved += bytes_to_add;
  3629. if (dest->reserved >= dest->size)
  3630. dest->full = 1;
  3631. num_bytes -= bytes_to_add;
  3632. }
  3633. spin_unlock(&dest->lock);
  3634. }
  3635. if (num_bytes) {
  3636. spin_lock(&space_info->lock);
  3637. space_info->bytes_may_use -= num_bytes;
  3638. trace_btrfs_space_reservation(fs_info, "space_info",
  3639. space_info->flags, num_bytes, 0);
  3640. space_info->reservation_progress++;
  3641. spin_unlock(&space_info->lock);
  3642. }
  3643. }
  3644. }
  3645. static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
  3646. struct btrfs_block_rsv *dst, u64 num_bytes)
  3647. {
  3648. int ret;
  3649. ret = block_rsv_use_bytes(src, num_bytes);
  3650. if (ret)
  3651. return ret;
  3652. block_rsv_add_bytes(dst, num_bytes, 1);
  3653. return 0;
  3654. }
  3655. void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
  3656. {
  3657. memset(rsv, 0, sizeof(*rsv));
  3658. spin_lock_init(&rsv->lock);
  3659. rsv->type = type;
  3660. }
  3661. struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
  3662. unsigned short type)
  3663. {
  3664. struct btrfs_block_rsv *block_rsv;
  3665. struct btrfs_fs_info *fs_info = root->fs_info;
  3666. block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
  3667. if (!block_rsv)
  3668. return NULL;
  3669. btrfs_init_block_rsv(block_rsv, type);
  3670. block_rsv->space_info = __find_space_info(fs_info,
  3671. BTRFS_BLOCK_GROUP_METADATA);
  3672. return block_rsv;
  3673. }
  3674. void btrfs_free_block_rsv(struct btrfs_root *root,
  3675. struct btrfs_block_rsv *rsv)
  3676. {
  3677. if (!rsv)
  3678. return;
  3679. btrfs_block_rsv_release(root, rsv, (u64)-1);
  3680. kfree(rsv);
  3681. }
  3682. static inline int __block_rsv_add(struct btrfs_root *root,
  3683. struct btrfs_block_rsv *block_rsv,
  3684. u64 num_bytes, int flush)
  3685. {
  3686. int ret;
  3687. if (num_bytes == 0)
  3688. return 0;
  3689. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  3690. if (!ret) {
  3691. block_rsv_add_bytes(block_rsv, num_bytes, 1);
  3692. return 0;
  3693. }
  3694. return ret;
  3695. }
  3696. int btrfs_block_rsv_add(struct btrfs_root *root,
  3697. struct btrfs_block_rsv *block_rsv,
  3698. u64 num_bytes)
  3699. {
  3700. return __block_rsv_add(root, block_rsv, num_bytes, 1);
  3701. }
  3702. int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
  3703. struct btrfs_block_rsv *block_rsv,
  3704. u64 num_bytes)
  3705. {
  3706. return __block_rsv_add(root, block_rsv, num_bytes, 0);
  3707. }
  3708. int btrfs_block_rsv_check(struct btrfs_root *root,
  3709. struct btrfs_block_rsv *block_rsv, int min_factor)
  3710. {
  3711. u64 num_bytes = 0;
  3712. int ret = -ENOSPC;
  3713. if (!block_rsv)
  3714. return 0;
  3715. spin_lock(&block_rsv->lock);
  3716. num_bytes = div_factor(block_rsv->size, min_factor);
  3717. if (block_rsv->reserved >= num_bytes)
  3718. ret = 0;
  3719. spin_unlock(&block_rsv->lock);
  3720. return ret;
  3721. }
  3722. static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
  3723. struct btrfs_block_rsv *block_rsv,
  3724. u64 min_reserved, int flush)
  3725. {
  3726. u64 num_bytes = 0;
  3727. int ret = -ENOSPC;
  3728. if (!block_rsv)
  3729. return 0;
  3730. spin_lock(&block_rsv->lock);
  3731. num_bytes = min_reserved;
  3732. if (block_rsv->reserved >= num_bytes)
  3733. ret = 0;
  3734. else
  3735. num_bytes -= block_rsv->reserved;
  3736. spin_unlock(&block_rsv->lock);
  3737. if (!ret)
  3738. return 0;
  3739. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  3740. if (!ret) {
  3741. block_rsv_add_bytes(block_rsv, num_bytes, 0);
  3742. return 0;
  3743. }
  3744. return ret;
  3745. }
  3746. int btrfs_block_rsv_refill(struct btrfs_root *root,
  3747. struct btrfs_block_rsv *block_rsv,
  3748. u64 min_reserved)
  3749. {
  3750. return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
  3751. }
  3752. int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
  3753. struct btrfs_block_rsv *block_rsv,
  3754. u64 min_reserved)
  3755. {
  3756. return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
  3757. }
  3758. int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
  3759. struct btrfs_block_rsv *dst_rsv,
  3760. u64 num_bytes)
  3761. {
  3762. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  3763. }
  3764. void btrfs_block_rsv_release(struct btrfs_root *root,
  3765. struct btrfs_block_rsv *block_rsv,
  3766. u64 num_bytes)
  3767. {
  3768. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3769. if (global_rsv->full || global_rsv == block_rsv ||
  3770. block_rsv->space_info != global_rsv->space_info)
  3771. global_rsv = NULL;
  3772. block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
  3773. num_bytes);
  3774. }
  3775. /*
  3776. * helper to calculate size of global block reservation.
  3777. * the desired value is sum of space used by extent tree,
  3778. * checksum tree and root tree
  3779. */
  3780. static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
  3781. {
  3782. struct btrfs_space_info *sinfo;
  3783. u64 num_bytes;
  3784. u64 meta_used;
  3785. u64 data_used;
  3786. int csum_size = btrfs_super_csum_size(fs_info->super_copy);
  3787. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
  3788. spin_lock(&sinfo->lock);
  3789. data_used = sinfo->bytes_used;
  3790. spin_unlock(&sinfo->lock);
  3791. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3792. spin_lock(&sinfo->lock);
  3793. if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
  3794. data_used = 0;
  3795. meta_used = sinfo->bytes_used;
  3796. spin_unlock(&sinfo->lock);
  3797. num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
  3798. csum_size * 2;
  3799. num_bytes += div64_u64(data_used + meta_used, 50);
  3800. if (num_bytes * 3 > meta_used)
  3801. num_bytes = div64_u64(meta_used, 3);
  3802. return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
  3803. }
  3804. static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
  3805. {
  3806. struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
  3807. struct btrfs_space_info *sinfo = block_rsv->space_info;
  3808. u64 num_bytes;
  3809. num_bytes = calc_global_metadata_size(fs_info);
  3810. spin_lock(&sinfo->lock);
  3811. spin_lock(&block_rsv->lock);
  3812. block_rsv->size = num_bytes;
  3813. num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
  3814. sinfo->bytes_reserved + sinfo->bytes_readonly +
  3815. sinfo->bytes_may_use;
  3816. if (sinfo->total_bytes > num_bytes) {
  3817. num_bytes = sinfo->total_bytes - num_bytes;
  3818. block_rsv->reserved += num_bytes;
  3819. sinfo->bytes_may_use += num_bytes;
  3820. trace_btrfs_space_reservation(fs_info, "space_info",
  3821. sinfo->flags, num_bytes, 1);
  3822. }
  3823. if (block_rsv->reserved >= block_rsv->size) {
  3824. num_bytes = block_rsv->reserved - block_rsv->size;
  3825. sinfo->bytes_may_use -= num_bytes;
  3826. trace_btrfs_space_reservation(fs_info, "space_info",
  3827. sinfo->flags, num_bytes, 0);
  3828. sinfo->reservation_progress++;
  3829. block_rsv->reserved = block_rsv->size;
  3830. block_rsv->full = 1;
  3831. }
  3832. spin_unlock(&block_rsv->lock);
  3833. spin_unlock(&sinfo->lock);
  3834. }
  3835. static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
  3836. {
  3837. struct btrfs_space_info *space_info;
  3838. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3839. fs_info->chunk_block_rsv.space_info = space_info;
  3840. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3841. fs_info->global_block_rsv.space_info = space_info;
  3842. fs_info->delalloc_block_rsv.space_info = space_info;
  3843. fs_info->trans_block_rsv.space_info = space_info;
  3844. fs_info->empty_block_rsv.space_info = space_info;
  3845. fs_info->delayed_block_rsv.space_info = space_info;
  3846. fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
  3847. fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
  3848. fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
  3849. fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
  3850. fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
  3851. update_global_block_rsv(fs_info);
  3852. }
  3853. static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
  3854. {
  3855. block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
  3856. (u64)-1);
  3857. WARN_ON(fs_info->delalloc_block_rsv.size > 0);
  3858. WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
  3859. WARN_ON(fs_info->trans_block_rsv.size > 0);
  3860. WARN_ON(fs_info->trans_block_rsv.reserved > 0);
  3861. WARN_ON(fs_info->chunk_block_rsv.size > 0);
  3862. WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
  3863. WARN_ON(fs_info->delayed_block_rsv.size > 0);
  3864. WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
  3865. }
  3866. void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
  3867. struct btrfs_root *root)
  3868. {
  3869. if (!trans->block_rsv)
  3870. return;
  3871. if (!trans->bytes_reserved)
  3872. return;
  3873. trace_btrfs_space_reservation(root->fs_info, "transaction",
  3874. trans->transid, trans->bytes_reserved, 0);
  3875. btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
  3876. trans->bytes_reserved = 0;
  3877. }
  3878. /* Can only return 0 or -ENOSPC */
  3879. int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
  3880. struct inode *inode)
  3881. {
  3882. struct btrfs_root *root = BTRFS_I(inode)->root;
  3883. struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
  3884. struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
  3885. /*
  3886. * We need to hold space in order to delete our orphan item once we've
  3887. * added it, so this takes the reservation so we can release it later
  3888. * when we are truly done with the orphan item.
  3889. */
  3890. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  3891. trace_btrfs_space_reservation(root->fs_info, "orphan",
  3892. btrfs_ino(inode), num_bytes, 1);
  3893. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  3894. }
  3895. void btrfs_orphan_release_metadata(struct inode *inode)
  3896. {
  3897. struct btrfs_root *root = BTRFS_I(inode)->root;
  3898. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  3899. trace_btrfs_space_reservation(root->fs_info, "orphan",
  3900. btrfs_ino(inode), num_bytes, 0);
  3901. btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
  3902. }
  3903. int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
  3904. struct btrfs_pending_snapshot *pending)
  3905. {
  3906. struct btrfs_root *root = pending->root;
  3907. struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
  3908. struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
  3909. /*
  3910. * two for root back/forward refs, two for directory entries,
  3911. * one for root of the snapshot and one for parent inode.
  3912. */
  3913. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 6);
  3914. dst_rsv->space_info = src_rsv->space_info;
  3915. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  3916. }
  3917. /**
  3918. * drop_outstanding_extent - drop an outstanding extent
  3919. * @inode: the inode we're dropping the extent for
  3920. *
  3921. * This is called when we are freeing up an outstanding extent, either called
  3922. * after an error or after an extent is written. This will return the number of
  3923. * reserved extents that need to be freed. This must be called with
  3924. * BTRFS_I(inode)->lock held.
  3925. */
  3926. static unsigned drop_outstanding_extent(struct inode *inode)
  3927. {
  3928. unsigned drop_inode_space = 0;
  3929. unsigned dropped_extents = 0;
  3930. BUG_ON(!BTRFS_I(inode)->outstanding_extents);
  3931. BTRFS_I(inode)->outstanding_extents--;
  3932. if (BTRFS_I(inode)->outstanding_extents == 0 &&
  3933. test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  3934. &BTRFS_I(inode)->runtime_flags))
  3935. drop_inode_space = 1;
  3936. /*
  3937. * If we have more or the same amount of outsanding extents than we have
  3938. * reserved then we need to leave the reserved extents count alone.
  3939. */
  3940. if (BTRFS_I(inode)->outstanding_extents >=
  3941. BTRFS_I(inode)->reserved_extents)
  3942. return drop_inode_space;
  3943. dropped_extents = BTRFS_I(inode)->reserved_extents -
  3944. BTRFS_I(inode)->outstanding_extents;
  3945. BTRFS_I(inode)->reserved_extents -= dropped_extents;
  3946. return dropped_extents + drop_inode_space;
  3947. }
  3948. /**
  3949. * calc_csum_metadata_size - return the amount of metada space that must be
  3950. * reserved/free'd for the given bytes.
  3951. * @inode: the inode we're manipulating
  3952. * @num_bytes: the number of bytes in question
  3953. * @reserve: 1 if we are reserving space, 0 if we are freeing space
  3954. *
  3955. * This adjusts the number of csum_bytes in the inode and then returns the
  3956. * correct amount of metadata that must either be reserved or freed. We
  3957. * calculate how many checksums we can fit into one leaf and then divide the
  3958. * number of bytes that will need to be checksumed by this value to figure out
  3959. * how many checksums will be required. If we are adding bytes then the number
  3960. * may go up and we will return the number of additional bytes that must be
  3961. * reserved. If it is going down we will return the number of bytes that must
  3962. * be freed.
  3963. *
  3964. * This must be called with BTRFS_I(inode)->lock held.
  3965. */
  3966. static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
  3967. int reserve)
  3968. {
  3969. struct btrfs_root *root = BTRFS_I(inode)->root;
  3970. u64 csum_size;
  3971. int num_csums_per_leaf;
  3972. int num_csums;
  3973. int old_csums;
  3974. if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
  3975. BTRFS_I(inode)->csum_bytes == 0)
  3976. return 0;
  3977. old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
  3978. if (reserve)
  3979. BTRFS_I(inode)->csum_bytes += num_bytes;
  3980. else
  3981. BTRFS_I(inode)->csum_bytes -= num_bytes;
  3982. csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
  3983. num_csums_per_leaf = (int)div64_u64(csum_size,
  3984. sizeof(struct btrfs_csum_item) +
  3985. sizeof(struct btrfs_disk_key));
  3986. num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
  3987. num_csums = num_csums + num_csums_per_leaf - 1;
  3988. num_csums = num_csums / num_csums_per_leaf;
  3989. old_csums = old_csums + num_csums_per_leaf - 1;
  3990. old_csums = old_csums / num_csums_per_leaf;
  3991. /* No change, no need to reserve more */
  3992. if (old_csums == num_csums)
  3993. return 0;
  3994. if (reserve)
  3995. return btrfs_calc_trans_metadata_size(root,
  3996. num_csums - old_csums);
  3997. return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
  3998. }
  3999. int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
  4000. {
  4001. struct btrfs_root *root = BTRFS_I(inode)->root;
  4002. struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
  4003. u64 to_reserve = 0;
  4004. u64 csum_bytes;
  4005. unsigned nr_extents = 0;
  4006. int extra_reserve = 0;
  4007. int flush = 1;
  4008. int ret;
  4009. /* Need to be holding the i_mutex here if we aren't free space cache */
  4010. if (btrfs_is_free_space_inode(inode))
  4011. flush = 0;
  4012. if (flush && btrfs_transaction_in_commit(root->fs_info))
  4013. schedule_timeout(1);
  4014. mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
  4015. num_bytes = ALIGN(num_bytes, root->sectorsize);
  4016. spin_lock(&BTRFS_I(inode)->lock);
  4017. BTRFS_I(inode)->outstanding_extents++;
  4018. if (BTRFS_I(inode)->outstanding_extents >
  4019. BTRFS_I(inode)->reserved_extents)
  4020. nr_extents = BTRFS_I(inode)->outstanding_extents -
  4021. BTRFS_I(inode)->reserved_extents;
  4022. /*
  4023. * Add an item to reserve for updating the inode when we complete the
  4024. * delalloc io.
  4025. */
  4026. if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  4027. &BTRFS_I(inode)->runtime_flags)) {
  4028. nr_extents++;
  4029. extra_reserve = 1;
  4030. }
  4031. to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
  4032. to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
  4033. csum_bytes = BTRFS_I(inode)->csum_bytes;
  4034. spin_unlock(&BTRFS_I(inode)->lock);
  4035. if (root->fs_info->quota_enabled) {
  4036. ret = btrfs_qgroup_reserve(root, num_bytes +
  4037. nr_extents * root->leafsize);
  4038. if (ret) {
  4039. mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  4040. return ret;
  4041. }
  4042. }
  4043. ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
  4044. if (ret) {
  4045. u64 to_free = 0;
  4046. unsigned dropped;
  4047. spin_lock(&BTRFS_I(inode)->lock);
  4048. dropped = drop_outstanding_extent(inode);
  4049. /*
  4050. * If the inodes csum_bytes is the same as the original
  4051. * csum_bytes then we know we haven't raced with any free()ers
  4052. * so we can just reduce our inodes csum bytes and carry on.
  4053. * Otherwise we have to do the normal free thing to account for
  4054. * the case that the free side didn't free up its reserve
  4055. * because of this outstanding reservation.
  4056. */
  4057. if (BTRFS_I(inode)->csum_bytes == csum_bytes)
  4058. calc_csum_metadata_size(inode, num_bytes, 0);
  4059. else
  4060. to_free = calc_csum_metadata_size(inode, num_bytes, 0);
  4061. spin_unlock(&BTRFS_I(inode)->lock);
  4062. if (dropped)
  4063. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  4064. if (to_free) {
  4065. btrfs_block_rsv_release(root, block_rsv, to_free);
  4066. trace_btrfs_space_reservation(root->fs_info,
  4067. "delalloc",
  4068. btrfs_ino(inode),
  4069. to_free, 0);
  4070. }
  4071. mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  4072. return ret;
  4073. }
  4074. spin_lock(&BTRFS_I(inode)->lock);
  4075. if (extra_reserve) {
  4076. set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  4077. &BTRFS_I(inode)->runtime_flags);
  4078. nr_extents--;
  4079. }
  4080. BTRFS_I(inode)->reserved_extents += nr_extents;
  4081. spin_unlock(&BTRFS_I(inode)->lock);
  4082. mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  4083. if (to_reserve)
  4084. trace_btrfs_space_reservation(root->fs_info,"delalloc",
  4085. btrfs_ino(inode), to_reserve, 1);
  4086. block_rsv_add_bytes(block_rsv, to_reserve, 1);
  4087. return 0;
  4088. }
  4089. /**
  4090. * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
  4091. * @inode: the inode to release the reservation for
  4092. * @num_bytes: the number of bytes we're releasing
  4093. *
  4094. * This will release the metadata reservation for an inode. This can be called
  4095. * once we complete IO for a given set of bytes to release their metadata
  4096. * reservations.
  4097. */
  4098. void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
  4099. {
  4100. struct btrfs_root *root = BTRFS_I(inode)->root;
  4101. u64 to_free = 0;
  4102. unsigned dropped;
  4103. num_bytes = ALIGN(num_bytes, root->sectorsize);
  4104. spin_lock(&BTRFS_I(inode)->lock);
  4105. dropped = drop_outstanding_extent(inode);
  4106. to_free = calc_csum_metadata_size(inode, num_bytes, 0);
  4107. spin_unlock(&BTRFS_I(inode)->lock);
  4108. if (dropped > 0)
  4109. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  4110. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  4111. btrfs_ino(inode), to_free, 0);
  4112. if (root->fs_info->quota_enabled) {
  4113. btrfs_qgroup_free(root, num_bytes +
  4114. dropped * root->leafsize);
  4115. }
  4116. btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
  4117. to_free);
  4118. }
  4119. /**
  4120. * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
  4121. * @inode: inode we're writing to
  4122. * @num_bytes: the number of bytes we want to allocate
  4123. *
  4124. * This will do the following things
  4125. *
  4126. * o reserve space in the data space info for num_bytes
  4127. * o reserve space in the metadata space info based on number of outstanding
  4128. * extents and how much csums will be needed
  4129. * o add to the inodes ->delalloc_bytes
  4130. * o add it to the fs_info's delalloc inodes list.
  4131. *
  4132. * This will return 0 for success and -ENOSPC if there is no space left.
  4133. */
  4134. int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
  4135. {
  4136. int ret;
  4137. ret = btrfs_check_data_free_space(inode, num_bytes);
  4138. if (ret)
  4139. return ret;
  4140. ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
  4141. if (ret) {
  4142. btrfs_free_reserved_data_space(inode, num_bytes);
  4143. return ret;
  4144. }
  4145. return 0;
  4146. }
  4147. /**
  4148. * btrfs_delalloc_release_space - release data and metadata space for delalloc
  4149. * @inode: inode we're releasing space for
  4150. * @num_bytes: the number of bytes we want to free up
  4151. *
  4152. * This must be matched with a call to btrfs_delalloc_reserve_space. This is
  4153. * called in the case that we don't need the metadata AND data reservations
  4154. * anymore. So if there is an error or we insert an inline extent.
  4155. *
  4156. * This function will release the metadata space that was not used and will
  4157. * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
  4158. * list if there are no delalloc bytes left.
  4159. */
  4160. void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
  4161. {
  4162. btrfs_delalloc_release_metadata(inode, num_bytes);
  4163. btrfs_free_reserved_data_space(inode, num_bytes);
  4164. }
  4165. static int update_block_group(struct btrfs_trans_handle *trans,
  4166. struct btrfs_root *root,
  4167. u64 bytenr, u64 num_bytes, int alloc)
  4168. {
  4169. struct btrfs_block_group_cache *cache = NULL;
  4170. struct btrfs_fs_info *info = root->fs_info;
  4171. u64 total = num_bytes;
  4172. u64 old_val;
  4173. u64 byte_in_group;
  4174. int factor;
  4175. /* block accounting for super block */
  4176. spin_lock(&info->delalloc_lock);
  4177. old_val = btrfs_super_bytes_used(info->super_copy);
  4178. if (alloc)
  4179. old_val += num_bytes;
  4180. else
  4181. old_val -= num_bytes;
  4182. btrfs_set_super_bytes_used(info->super_copy, old_val);
  4183. spin_unlock(&info->delalloc_lock);
  4184. while (total) {
  4185. cache = btrfs_lookup_block_group(info, bytenr);
  4186. if (!cache)
  4187. return -ENOENT;
  4188. if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
  4189. BTRFS_BLOCK_GROUP_RAID1 |
  4190. BTRFS_BLOCK_GROUP_RAID10))
  4191. factor = 2;
  4192. else
  4193. factor = 1;
  4194. /*
  4195. * If this block group has free space cache written out, we
  4196. * need to make sure to load it if we are removing space. This
  4197. * is because we need the unpinning stage to actually add the
  4198. * space back to the block group, otherwise we will leak space.
  4199. */
  4200. if (!alloc && cache->cached == BTRFS_CACHE_NO)
  4201. cache_block_group(cache, trans, NULL, 1);
  4202. byte_in_group = bytenr - cache->key.objectid;
  4203. WARN_ON(byte_in_group > cache->key.offset);
  4204. spin_lock(&cache->space_info->lock);
  4205. spin_lock(&cache->lock);
  4206. if (btrfs_test_opt(root, SPACE_CACHE) &&
  4207. cache->disk_cache_state < BTRFS_DC_CLEAR)
  4208. cache->disk_cache_state = BTRFS_DC_CLEAR;
  4209. cache->dirty = 1;
  4210. old_val = btrfs_block_group_used(&cache->item);
  4211. num_bytes = min(total, cache->key.offset - byte_in_group);
  4212. if (alloc) {
  4213. old_val += num_bytes;
  4214. btrfs_set_block_group_used(&cache->item, old_val);
  4215. cache->reserved -= num_bytes;
  4216. cache->space_info->bytes_reserved -= num_bytes;
  4217. cache->space_info->bytes_used += num_bytes;
  4218. cache->space_info->disk_used += num_bytes * factor;
  4219. spin_unlock(&cache->lock);
  4220. spin_unlock(&cache->space_info->lock);
  4221. } else {
  4222. old_val -= num_bytes;
  4223. btrfs_set_block_group_used(&cache->item, old_val);
  4224. cache->pinned += num_bytes;
  4225. cache->space_info->bytes_pinned += num_bytes;
  4226. cache->space_info->bytes_used -= num_bytes;
  4227. cache->space_info->disk_used -= num_bytes * factor;
  4228. spin_unlock(&cache->lock);
  4229. spin_unlock(&cache->space_info->lock);
  4230. set_extent_dirty(info->pinned_extents,
  4231. bytenr, bytenr + num_bytes - 1,
  4232. GFP_NOFS | __GFP_NOFAIL);
  4233. }
  4234. btrfs_put_block_group(cache);
  4235. total -= num_bytes;
  4236. bytenr += num_bytes;
  4237. }
  4238. return 0;
  4239. }
  4240. static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
  4241. {
  4242. struct btrfs_block_group_cache *cache;
  4243. u64 bytenr;
  4244. cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
  4245. if (!cache)
  4246. return 0;
  4247. bytenr = cache->key.objectid;
  4248. btrfs_put_block_group(cache);
  4249. return bytenr;
  4250. }
  4251. static int pin_down_extent(struct btrfs_root *root,
  4252. struct btrfs_block_group_cache *cache,
  4253. u64 bytenr, u64 num_bytes, int reserved)
  4254. {
  4255. spin_lock(&cache->space_info->lock);
  4256. spin_lock(&cache->lock);
  4257. cache->pinned += num_bytes;
  4258. cache->space_info->bytes_pinned += num_bytes;
  4259. if (reserved) {
  4260. cache->reserved -= num_bytes;
  4261. cache->space_info->bytes_reserved -= num_bytes;
  4262. }
  4263. spin_unlock(&cache->lock);
  4264. spin_unlock(&cache->space_info->lock);
  4265. set_extent_dirty(root->fs_info->pinned_extents, bytenr,
  4266. bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
  4267. return 0;
  4268. }
  4269. /*
  4270. * this function must be called within transaction
  4271. */
  4272. int btrfs_pin_extent(struct btrfs_root *root,
  4273. u64 bytenr, u64 num_bytes, int reserved)
  4274. {
  4275. struct btrfs_block_group_cache *cache;
  4276. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  4277. BUG_ON(!cache); /* Logic error */
  4278. pin_down_extent(root, cache, bytenr, num_bytes, reserved);
  4279. btrfs_put_block_group(cache);
  4280. return 0;
  4281. }
  4282. /*
  4283. * this function must be called within transaction
  4284. */
  4285. int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
  4286. struct btrfs_root *root,
  4287. u64 bytenr, u64 num_bytes)
  4288. {
  4289. struct btrfs_block_group_cache *cache;
  4290. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  4291. BUG_ON(!cache); /* Logic error */
  4292. /*
  4293. * pull in the free space cache (if any) so that our pin
  4294. * removes the free space from the cache. We have load_only set
  4295. * to one because the slow code to read in the free extents does check
  4296. * the pinned extents.
  4297. */
  4298. cache_block_group(cache, trans, root, 1);
  4299. pin_down_extent(root, cache, bytenr, num_bytes, 0);
  4300. /* remove us from the free space cache (if we're there at all) */
  4301. btrfs_remove_free_space(cache, bytenr, num_bytes);
  4302. btrfs_put_block_group(cache);
  4303. return 0;
  4304. }
  4305. /**
  4306. * btrfs_update_reserved_bytes - update the block_group and space info counters
  4307. * @cache: The cache we are manipulating
  4308. * @num_bytes: The number of bytes in question
  4309. * @reserve: One of the reservation enums
  4310. *
  4311. * This is called by the allocator when it reserves space, or by somebody who is
  4312. * freeing space that was never actually used on disk. For example if you
  4313. * reserve some space for a new leaf in transaction A and before transaction A
  4314. * commits you free that leaf, you call this with reserve set to 0 in order to
  4315. * clear the reservation.
  4316. *
  4317. * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
  4318. * ENOSPC accounting. For data we handle the reservation through clearing the
  4319. * delalloc bits in the io_tree. We have to do this since we could end up
  4320. * allocating less disk space for the amount of data we have reserved in the
  4321. * case of compression.
  4322. *
  4323. * If this is a reservation and the block group has become read only we cannot
  4324. * make the reservation and return -EAGAIN, otherwise this function always
  4325. * succeeds.
  4326. */
  4327. static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
  4328. u64 num_bytes, int reserve)
  4329. {
  4330. struct btrfs_space_info *space_info = cache->space_info;
  4331. int ret = 0;
  4332. spin_lock(&space_info->lock);
  4333. spin_lock(&cache->lock);
  4334. if (reserve != RESERVE_FREE) {
  4335. if (cache->ro) {
  4336. ret = -EAGAIN;
  4337. } else {
  4338. cache->reserved += num_bytes;
  4339. space_info->bytes_reserved += num_bytes;
  4340. if (reserve == RESERVE_ALLOC) {
  4341. trace_btrfs_space_reservation(cache->fs_info,
  4342. "space_info", space_info->flags,
  4343. num_bytes, 0);
  4344. space_info->bytes_may_use -= num_bytes;
  4345. }
  4346. }
  4347. } else {
  4348. if (cache->ro)
  4349. space_info->bytes_readonly += num_bytes;
  4350. cache->reserved -= num_bytes;
  4351. space_info->bytes_reserved -= num_bytes;
  4352. space_info->reservation_progress++;
  4353. }
  4354. spin_unlock(&cache->lock);
  4355. spin_unlock(&space_info->lock);
  4356. return ret;
  4357. }
  4358. void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
  4359. struct btrfs_root *root)
  4360. {
  4361. struct btrfs_fs_info *fs_info = root->fs_info;
  4362. struct btrfs_caching_control *next;
  4363. struct btrfs_caching_control *caching_ctl;
  4364. struct btrfs_block_group_cache *cache;
  4365. down_write(&fs_info->extent_commit_sem);
  4366. list_for_each_entry_safe(caching_ctl, next,
  4367. &fs_info->caching_block_groups, list) {
  4368. cache = caching_ctl->block_group;
  4369. if (block_group_cache_done(cache)) {
  4370. cache->last_byte_to_unpin = (u64)-1;
  4371. list_del_init(&caching_ctl->list);
  4372. put_caching_control(caching_ctl);
  4373. } else {
  4374. cache->last_byte_to_unpin = caching_ctl->progress;
  4375. }
  4376. }
  4377. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  4378. fs_info->pinned_extents = &fs_info->freed_extents[1];
  4379. else
  4380. fs_info->pinned_extents = &fs_info->freed_extents[0];
  4381. up_write(&fs_info->extent_commit_sem);
  4382. update_global_block_rsv(fs_info);
  4383. }
  4384. static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  4385. {
  4386. struct btrfs_fs_info *fs_info = root->fs_info;
  4387. struct btrfs_block_group_cache *cache = NULL;
  4388. u64 len;
  4389. while (start <= end) {
  4390. if (!cache ||
  4391. start >= cache->key.objectid + cache->key.offset) {
  4392. if (cache)
  4393. btrfs_put_block_group(cache);
  4394. cache = btrfs_lookup_block_group(fs_info, start);
  4395. BUG_ON(!cache); /* Logic error */
  4396. }
  4397. len = cache->key.objectid + cache->key.offset - start;
  4398. len = min(len, end + 1 - start);
  4399. if (start < cache->last_byte_to_unpin) {
  4400. len = min(len, cache->last_byte_to_unpin - start);
  4401. btrfs_add_free_space(cache, start, len);
  4402. }
  4403. start += len;
  4404. spin_lock(&cache->space_info->lock);
  4405. spin_lock(&cache->lock);
  4406. cache->pinned -= len;
  4407. cache->space_info->bytes_pinned -= len;
  4408. if (cache->ro)
  4409. cache->space_info->bytes_readonly += len;
  4410. spin_unlock(&cache->lock);
  4411. spin_unlock(&cache->space_info->lock);
  4412. }
  4413. if (cache)
  4414. btrfs_put_block_group(cache);
  4415. return 0;
  4416. }
  4417. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  4418. struct btrfs_root *root)
  4419. {
  4420. struct btrfs_fs_info *fs_info = root->fs_info;
  4421. struct extent_io_tree *unpin;
  4422. u64 start;
  4423. u64 end;
  4424. int ret;
  4425. if (trans->aborted)
  4426. return 0;
  4427. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  4428. unpin = &fs_info->freed_extents[1];
  4429. else
  4430. unpin = &fs_info->freed_extents[0];
  4431. while (1) {
  4432. ret = find_first_extent_bit(unpin, 0, &start, &end,
  4433. EXTENT_DIRTY);
  4434. if (ret)
  4435. break;
  4436. if (btrfs_test_opt(root, DISCARD))
  4437. ret = btrfs_discard_extent(root, start,
  4438. end + 1 - start, NULL);
  4439. clear_extent_dirty(unpin, start, end, GFP_NOFS);
  4440. unpin_extent_range(root, start, end);
  4441. cond_resched();
  4442. }
  4443. return 0;
  4444. }
  4445. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  4446. struct btrfs_root *root,
  4447. u64 bytenr, u64 num_bytes, u64 parent,
  4448. u64 root_objectid, u64 owner_objectid,
  4449. u64 owner_offset, int refs_to_drop,
  4450. struct btrfs_delayed_extent_op *extent_op)
  4451. {
  4452. struct btrfs_key key;
  4453. struct btrfs_path *path;
  4454. struct btrfs_fs_info *info = root->fs_info;
  4455. struct btrfs_root *extent_root = info->extent_root;
  4456. struct extent_buffer *leaf;
  4457. struct btrfs_extent_item *ei;
  4458. struct btrfs_extent_inline_ref *iref;
  4459. int ret;
  4460. int is_data;
  4461. int extent_slot = 0;
  4462. int found_extent = 0;
  4463. int num_to_del = 1;
  4464. u32 item_size;
  4465. u64 refs;
  4466. path = btrfs_alloc_path();
  4467. if (!path)
  4468. return -ENOMEM;
  4469. path->reada = 1;
  4470. path->leave_spinning = 1;
  4471. is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
  4472. BUG_ON(!is_data && refs_to_drop != 1);
  4473. ret = lookup_extent_backref(trans, extent_root, path, &iref,
  4474. bytenr, num_bytes, parent,
  4475. root_objectid, owner_objectid,
  4476. owner_offset);
  4477. if (ret == 0) {
  4478. extent_slot = path->slots[0];
  4479. while (extent_slot >= 0) {
  4480. btrfs_item_key_to_cpu(path->nodes[0], &key,
  4481. extent_slot);
  4482. if (key.objectid != bytenr)
  4483. break;
  4484. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  4485. key.offset == num_bytes) {
  4486. found_extent = 1;
  4487. break;
  4488. }
  4489. if (path->slots[0] - extent_slot > 5)
  4490. break;
  4491. extent_slot--;
  4492. }
  4493. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  4494. item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
  4495. if (found_extent && item_size < sizeof(*ei))
  4496. found_extent = 0;
  4497. #endif
  4498. if (!found_extent) {
  4499. BUG_ON(iref);
  4500. ret = remove_extent_backref(trans, extent_root, path,
  4501. NULL, refs_to_drop,
  4502. is_data);
  4503. if (ret)
  4504. goto abort;
  4505. btrfs_release_path(path);
  4506. path->leave_spinning = 1;
  4507. key.objectid = bytenr;
  4508. key.type = BTRFS_EXTENT_ITEM_KEY;
  4509. key.offset = num_bytes;
  4510. ret = btrfs_search_slot(trans, extent_root,
  4511. &key, path, -1, 1);
  4512. if (ret) {
  4513. printk(KERN_ERR "umm, got %d back from search"
  4514. ", was looking for %llu\n", ret,
  4515. (unsigned long long)bytenr);
  4516. if (ret > 0)
  4517. btrfs_print_leaf(extent_root,
  4518. path->nodes[0]);
  4519. }
  4520. if (ret < 0)
  4521. goto abort;
  4522. extent_slot = path->slots[0];
  4523. }
  4524. } else if (ret == -ENOENT) {
  4525. btrfs_print_leaf(extent_root, path->nodes[0]);
  4526. WARN_ON(1);
  4527. printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
  4528. "parent %llu root %llu owner %llu offset %llu\n",
  4529. (unsigned long long)bytenr,
  4530. (unsigned long long)parent,
  4531. (unsigned long long)root_objectid,
  4532. (unsigned long long)owner_objectid,
  4533. (unsigned long long)owner_offset);
  4534. } else {
  4535. goto abort;
  4536. }
  4537. leaf = path->nodes[0];
  4538. item_size = btrfs_item_size_nr(leaf, extent_slot);
  4539. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  4540. if (item_size < sizeof(*ei)) {
  4541. BUG_ON(found_extent || extent_slot != path->slots[0]);
  4542. ret = convert_extent_item_v0(trans, extent_root, path,
  4543. owner_objectid, 0);
  4544. if (ret < 0)
  4545. goto abort;
  4546. btrfs_release_path(path);
  4547. path->leave_spinning = 1;
  4548. key.objectid = bytenr;
  4549. key.type = BTRFS_EXTENT_ITEM_KEY;
  4550. key.offset = num_bytes;
  4551. ret = btrfs_search_slot(trans, extent_root, &key, path,
  4552. -1, 1);
  4553. if (ret) {
  4554. printk(KERN_ERR "umm, got %d back from search"
  4555. ", was looking for %llu\n", ret,
  4556. (unsigned long long)bytenr);
  4557. btrfs_print_leaf(extent_root, path->nodes[0]);
  4558. }
  4559. if (ret < 0)
  4560. goto abort;
  4561. extent_slot = path->slots[0];
  4562. leaf = path->nodes[0];
  4563. item_size = btrfs_item_size_nr(leaf, extent_slot);
  4564. }
  4565. #endif
  4566. BUG_ON(item_size < sizeof(*ei));
  4567. ei = btrfs_item_ptr(leaf, extent_slot,
  4568. struct btrfs_extent_item);
  4569. if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
  4570. struct btrfs_tree_block_info *bi;
  4571. BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
  4572. bi = (struct btrfs_tree_block_info *)(ei + 1);
  4573. WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
  4574. }
  4575. refs = btrfs_extent_refs(leaf, ei);
  4576. BUG_ON(refs < refs_to_drop);
  4577. refs -= refs_to_drop;
  4578. if (refs > 0) {
  4579. if (extent_op)
  4580. __run_delayed_extent_op(extent_op, leaf, ei);
  4581. /*
  4582. * In the case of inline back ref, reference count will
  4583. * be updated by remove_extent_backref
  4584. */
  4585. if (iref) {
  4586. BUG_ON(!found_extent);
  4587. } else {
  4588. btrfs_set_extent_refs(leaf, ei, refs);
  4589. btrfs_mark_buffer_dirty(leaf);
  4590. }
  4591. if (found_extent) {
  4592. ret = remove_extent_backref(trans, extent_root, path,
  4593. iref, refs_to_drop,
  4594. is_data);
  4595. if (ret)
  4596. goto abort;
  4597. }
  4598. } else {
  4599. if (found_extent) {
  4600. BUG_ON(is_data && refs_to_drop !=
  4601. extent_data_ref_count(root, path, iref));
  4602. if (iref) {
  4603. BUG_ON(path->slots[0] != extent_slot);
  4604. } else {
  4605. BUG_ON(path->slots[0] != extent_slot + 1);
  4606. path->slots[0] = extent_slot;
  4607. num_to_del = 2;
  4608. }
  4609. }
  4610. ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
  4611. num_to_del);
  4612. if (ret)
  4613. goto abort;
  4614. btrfs_release_path(path);
  4615. if (is_data) {
  4616. ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
  4617. if (ret)
  4618. goto abort;
  4619. }
  4620. ret = update_block_group(trans, root, bytenr, num_bytes, 0);
  4621. if (ret)
  4622. goto abort;
  4623. }
  4624. out:
  4625. btrfs_free_path(path);
  4626. return ret;
  4627. abort:
  4628. btrfs_abort_transaction(trans, extent_root, ret);
  4629. goto out;
  4630. }
  4631. /*
  4632. * when we free an block, it is possible (and likely) that we free the last
  4633. * delayed ref for that extent as well. This searches the delayed ref tree for
  4634. * a given extent, and if there are no other delayed refs to be processed, it
  4635. * removes it from the tree.
  4636. */
  4637. static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
  4638. struct btrfs_root *root, u64 bytenr)
  4639. {
  4640. struct btrfs_delayed_ref_head *head;
  4641. struct btrfs_delayed_ref_root *delayed_refs;
  4642. struct btrfs_delayed_ref_node *ref;
  4643. struct rb_node *node;
  4644. int ret = 0;
  4645. delayed_refs = &trans->transaction->delayed_refs;
  4646. spin_lock(&delayed_refs->lock);
  4647. head = btrfs_find_delayed_ref_head(trans, bytenr);
  4648. if (!head)
  4649. goto out;
  4650. node = rb_prev(&head->node.rb_node);
  4651. if (!node)
  4652. goto out;
  4653. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  4654. /* there are still entries for this ref, we can't drop it */
  4655. if (ref->bytenr == bytenr)
  4656. goto out;
  4657. if (head->extent_op) {
  4658. if (!head->must_insert_reserved)
  4659. goto out;
  4660. kfree(head->extent_op);
  4661. head->extent_op = NULL;
  4662. }
  4663. /*
  4664. * waiting for the lock here would deadlock. If someone else has it
  4665. * locked they are already in the process of dropping it anyway
  4666. */
  4667. if (!mutex_trylock(&head->mutex))
  4668. goto out;
  4669. /*
  4670. * at this point we have a head with no other entries. Go
  4671. * ahead and process it.
  4672. */
  4673. head->node.in_tree = 0;
  4674. rb_erase(&head->node.rb_node, &delayed_refs->root);
  4675. delayed_refs->num_entries--;
  4676. /*
  4677. * we don't take a ref on the node because we're removing it from the
  4678. * tree, so we just steal the ref the tree was holding.
  4679. */
  4680. delayed_refs->num_heads--;
  4681. if (list_empty(&head->cluster))
  4682. delayed_refs->num_heads_ready--;
  4683. list_del_init(&head->cluster);
  4684. spin_unlock(&delayed_refs->lock);
  4685. BUG_ON(head->extent_op);
  4686. if (head->must_insert_reserved)
  4687. ret = 1;
  4688. mutex_unlock(&head->mutex);
  4689. btrfs_put_delayed_ref(&head->node);
  4690. return ret;
  4691. out:
  4692. spin_unlock(&delayed_refs->lock);
  4693. return 0;
  4694. }
  4695. void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
  4696. struct btrfs_root *root,
  4697. struct extent_buffer *buf,
  4698. u64 parent, int last_ref)
  4699. {
  4700. struct btrfs_block_group_cache *cache = NULL;
  4701. int ret;
  4702. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  4703. ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
  4704. buf->start, buf->len,
  4705. parent, root->root_key.objectid,
  4706. btrfs_header_level(buf),
  4707. BTRFS_DROP_DELAYED_REF, NULL, 0);
  4708. BUG_ON(ret); /* -ENOMEM */
  4709. }
  4710. if (!last_ref)
  4711. return;
  4712. cache = btrfs_lookup_block_group(root->fs_info, buf->start);
  4713. if (btrfs_header_generation(buf) == trans->transid) {
  4714. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  4715. ret = check_ref_cleanup(trans, root, buf->start);
  4716. if (!ret)
  4717. goto out;
  4718. }
  4719. if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
  4720. pin_down_extent(root, cache, buf->start, buf->len, 1);
  4721. goto out;
  4722. }
  4723. WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
  4724. btrfs_add_free_space(cache, buf->start, buf->len);
  4725. btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
  4726. }
  4727. out:
  4728. /*
  4729. * Deleting the buffer, clear the corrupt flag since it doesn't matter
  4730. * anymore.
  4731. */
  4732. clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
  4733. btrfs_put_block_group(cache);
  4734. }
  4735. /* Can return -ENOMEM */
  4736. int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  4737. u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
  4738. u64 owner, u64 offset, int for_cow)
  4739. {
  4740. int ret;
  4741. struct btrfs_fs_info *fs_info = root->fs_info;
  4742. /*
  4743. * tree log blocks never actually go into the extent allocation
  4744. * tree, just update pinning info and exit early.
  4745. */
  4746. if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
  4747. WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
  4748. /* unlocks the pinned mutex */
  4749. btrfs_pin_extent(root, bytenr, num_bytes, 1);
  4750. ret = 0;
  4751. } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  4752. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  4753. num_bytes,
  4754. parent, root_objectid, (int)owner,
  4755. BTRFS_DROP_DELAYED_REF, NULL, for_cow);
  4756. } else {
  4757. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  4758. num_bytes,
  4759. parent, root_objectid, owner,
  4760. offset, BTRFS_DROP_DELAYED_REF,
  4761. NULL, for_cow);
  4762. }
  4763. return ret;
  4764. }
  4765. static u64 stripe_align(struct btrfs_root *root, u64 val)
  4766. {
  4767. u64 mask = ((u64)root->stripesize - 1);
  4768. u64 ret = (val + mask) & ~mask;
  4769. return ret;
  4770. }
  4771. /*
  4772. * when we wait for progress in the block group caching, its because
  4773. * our allocation attempt failed at least once. So, we must sleep
  4774. * and let some progress happen before we try again.
  4775. *
  4776. * This function will sleep at least once waiting for new free space to
  4777. * show up, and then it will check the block group free space numbers
  4778. * for our min num_bytes. Another option is to have it go ahead
  4779. * and look in the rbtree for a free extent of a given size, but this
  4780. * is a good start.
  4781. */
  4782. static noinline int
  4783. wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
  4784. u64 num_bytes)
  4785. {
  4786. struct btrfs_caching_control *caching_ctl;
  4787. DEFINE_WAIT(wait);
  4788. caching_ctl = get_caching_control(cache);
  4789. if (!caching_ctl)
  4790. return 0;
  4791. wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
  4792. (cache->free_space_ctl->free_space >= num_bytes));
  4793. put_caching_control(caching_ctl);
  4794. return 0;
  4795. }
  4796. static noinline int
  4797. wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
  4798. {
  4799. struct btrfs_caching_control *caching_ctl;
  4800. DEFINE_WAIT(wait);
  4801. caching_ctl = get_caching_control(cache);
  4802. if (!caching_ctl)
  4803. return 0;
  4804. wait_event(caching_ctl->wait, block_group_cache_done(cache));
  4805. put_caching_control(caching_ctl);
  4806. return 0;
  4807. }
  4808. static int __get_block_group_index(u64 flags)
  4809. {
  4810. int index;
  4811. if (flags & BTRFS_BLOCK_GROUP_RAID10)
  4812. index = 0;
  4813. else if (flags & BTRFS_BLOCK_GROUP_RAID1)
  4814. index = 1;
  4815. else if (flags & BTRFS_BLOCK_GROUP_DUP)
  4816. index = 2;
  4817. else if (flags & BTRFS_BLOCK_GROUP_RAID0)
  4818. index = 3;
  4819. else
  4820. index = 4;
  4821. return index;
  4822. }
  4823. static int get_block_group_index(struct btrfs_block_group_cache *cache)
  4824. {
  4825. return __get_block_group_index(cache->flags);
  4826. }
  4827. enum btrfs_loop_type {
  4828. LOOP_CACHING_NOWAIT = 0,
  4829. LOOP_CACHING_WAIT = 1,
  4830. LOOP_ALLOC_CHUNK = 2,
  4831. LOOP_NO_EMPTY_SIZE = 3,
  4832. };
  4833. /*
  4834. * walks the btree of allocated extents and find a hole of a given size.
  4835. * The key ins is changed to record the hole:
  4836. * ins->objectid == block start
  4837. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  4838. * ins->offset == number of blocks
  4839. * Any available blocks before search_start are skipped.
  4840. */
  4841. static noinline int find_free_extent(struct btrfs_trans_handle *trans,
  4842. struct btrfs_root *orig_root,
  4843. u64 num_bytes, u64 empty_size,
  4844. u64 hint_byte, struct btrfs_key *ins,
  4845. u64 data)
  4846. {
  4847. int ret = 0;
  4848. struct btrfs_root *root = orig_root->fs_info->extent_root;
  4849. struct btrfs_free_cluster *last_ptr = NULL;
  4850. struct btrfs_block_group_cache *block_group = NULL;
  4851. struct btrfs_block_group_cache *used_block_group;
  4852. u64 search_start = 0;
  4853. int empty_cluster = 2 * 1024 * 1024;
  4854. int allowed_chunk_alloc = 0;
  4855. int done_chunk_alloc = 0;
  4856. struct btrfs_space_info *space_info;
  4857. int loop = 0;
  4858. int index = 0;
  4859. int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
  4860. RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
  4861. bool found_uncached_bg = false;
  4862. bool failed_cluster_refill = false;
  4863. bool failed_alloc = false;
  4864. bool use_cluster = true;
  4865. bool have_caching_bg = false;
  4866. WARN_ON(num_bytes < root->sectorsize);
  4867. btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
  4868. ins->objectid = 0;
  4869. ins->offset = 0;
  4870. trace_find_free_extent(orig_root, num_bytes, empty_size, data);
  4871. space_info = __find_space_info(root->fs_info, data);
  4872. if (!space_info) {
  4873. printk(KERN_ERR "No space info for %llu\n", data);
  4874. return -ENOSPC;
  4875. }
  4876. /*
  4877. * If the space info is for both data and metadata it means we have a
  4878. * small filesystem and we can't use the clustering stuff.
  4879. */
  4880. if (btrfs_mixed_space_info(space_info))
  4881. use_cluster = false;
  4882. if (orig_root->ref_cows || empty_size)
  4883. allowed_chunk_alloc = 1;
  4884. if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
  4885. last_ptr = &root->fs_info->meta_alloc_cluster;
  4886. if (!btrfs_test_opt(root, SSD))
  4887. empty_cluster = 64 * 1024;
  4888. }
  4889. if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
  4890. btrfs_test_opt(root, SSD)) {
  4891. last_ptr = &root->fs_info->data_alloc_cluster;
  4892. }
  4893. if (last_ptr) {
  4894. spin_lock(&last_ptr->lock);
  4895. if (last_ptr->block_group)
  4896. hint_byte = last_ptr->window_start;
  4897. spin_unlock(&last_ptr->lock);
  4898. }
  4899. search_start = max(search_start, first_logical_byte(root, 0));
  4900. search_start = max(search_start, hint_byte);
  4901. if (!last_ptr)
  4902. empty_cluster = 0;
  4903. if (search_start == hint_byte) {
  4904. block_group = btrfs_lookup_block_group(root->fs_info,
  4905. search_start);
  4906. used_block_group = block_group;
  4907. /*
  4908. * we don't want to use the block group if it doesn't match our
  4909. * allocation bits, or if its not cached.
  4910. *
  4911. * However if we are re-searching with an ideal block group
  4912. * picked out then we don't care that the block group is cached.
  4913. */
  4914. if (block_group && block_group_bits(block_group, data) &&
  4915. block_group->cached != BTRFS_CACHE_NO) {
  4916. down_read(&space_info->groups_sem);
  4917. if (list_empty(&block_group->list) ||
  4918. block_group->ro) {
  4919. /*
  4920. * someone is removing this block group,
  4921. * we can't jump into the have_block_group
  4922. * target because our list pointers are not
  4923. * valid
  4924. */
  4925. btrfs_put_block_group(block_group);
  4926. up_read(&space_info->groups_sem);
  4927. } else {
  4928. index = get_block_group_index(block_group);
  4929. goto have_block_group;
  4930. }
  4931. } else if (block_group) {
  4932. btrfs_put_block_group(block_group);
  4933. }
  4934. }
  4935. search:
  4936. have_caching_bg = false;
  4937. down_read(&space_info->groups_sem);
  4938. list_for_each_entry(block_group, &space_info->block_groups[index],
  4939. list) {
  4940. u64 offset;
  4941. int cached;
  4942. used_block_group = block_group;
  4943. btrfs_get_block_group(block_group);
  4944. search_start = block_group->key.objectid;
  4945. /*
  4946. * this can happen if we end up cycling through all the
  4947. * raid types, but we want to make sure we only allocate
  4948. * for the proper type.
  4949. */
  4950. if (!block_group_bits(block_group, data)) {
  4951. u64 extra = BTRFS_BLOCK_GROUP_DUP |
  4952. BTRFS_BLOCK_GROUP_RAID1 |
  4953. BTRFS_BLOCK_GROUP_RAID10;
  4954. /*
  4955. * if they asked for extra copies and this block group
  4956. * doesn't provide them, bail. This does allow us to
  4957. * fill raid0 from raid1.
  4958. */
  4959. if ((data & extra) && !(block_group->flags & extra))
  4960. goto loop;
  4961. }
  4962. have_block_group:
  4963. cached = block_group_cache_done(block_group);
  4964. if (unlikely(!cached)) {
  4965. found_uncached_bg = true;
  4966. ret = cache_block_group(block_group, trans,
  4967. orig_root, 0);
  4968. BUG_ON(ret < 0);
  4969. ret = 0;
  4970. }
  4971. if (unlikely(block_group->ro))
  4972. goto loop;
  4973. /*
  4974. * Ok we want to try and use the cluster allocator, so
  4975. * lets look there
  4976. */
  4977. if (last_ptr) {
  4978. /*
  4979. * the refill lock keeps out other
  4980. * people trying to start a new cluster
  4981. */
  4982. spin_lock(&last_ptr->refill_lock);
  4983. used_block_group = last_ptr->block_group;
  4984. if (used_block_group != block_group &&
  4985. (!used_block_group ||
  4986. used_block_group->ro ||
  4987. !block_group_bits(used_block_group, data))) {
  4988. used_block_group = block_group;
  4989. goto refill_cluster;
  4990. }
  4991. if (used_block_group != block_group)
  4992. btrfs_get_block_group(used_block_group);
  4993. offset = btrfs_alloc_from_cluster(used_block_group,
  4994. last_ptr, num_bytes, used_block_group->key.objectid);
  4995. if (offset) {
  4996. /* we have a block, we're done */
  4997. spin_unlock(&last_ptr->refill_lock);
  4998. trace_btrfs_reserve_extent_cluster(root,
  4999. block_group, search_start, num_bytes);
  5000. goto checks;
  5001. }
  5002. WARN_ON(last_ptr->block_group != used_block_group);
  5003. if (used_block_group != block_group) {
  5004. btrfs_put_block_group(used_block_group);
  5005. used_block_group = block_group;
  5006. }
  5007. refill_cluster:
  5008. BUG_ON(used_block_group != block_group);
  5009. /* If we are on LOOP_NO_EMPTY_SIZE, we can't
  5010. * set up a new clusters, so lets just skip it
  5011. * and let the allocator find whatever block
  5012. * it can find. If we reach this point, we
  5013. * will have tried the cluster allocator
  5014. * plenty of times and not have found
  5015. * anything, so we are likely way too
  5016. * fragmented for the clustering stuff to find
  5017. * anything.
  5018. *
  5019. * However, if the cluster is taken from the
  5020. * current block group, release the cluster
  5021. * first, so that we stand a better chance of
  5022. * succeeding in the unclustered
  5023. * allocation. */
  5024. if (loop >= LOOP_NO_EMPTY_SIZE &&
  5025. last_ptr->block_group != block_group) {
  5026. spin_unlock(&last_ptr->refill_lock);
  5027. goto unclustered_alloc;
  5028. }
  5029. /*
  5030. * this cluster didn't work out, free it and
  5031. * start over
  5032. */
  5033. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  5034. if (loop >= LOOP_NO_EMPTY_SIZE) {
  5035. spin_unlock(&last_ptr->refill_lock);
  5036. goto unclustered_alloc;
  5037. }
  5038. /* allocate a cluster in this block group */
  5039. ret = btrfs_find_space_cluster(trans, root,
  5040. block_group, last_ptr,
  5041. search_start, num_bytes,
  5042. empty_cluster + empty_size);
  5043. if (ret == 0) {
  5044. /*
  5045. * now pull our allocation out of this
  5046. * cluster
  5047. */
  5048. offset = btrfs_alloc_from_cluster(block_group,
  5049. last_ptr, num_bytes,
  5050. search_start);
  5051. if (offset) {
  5052. /* we found one, proceed */
  5053. spin_unlock(&last_ptr->refill_lock);
  5054. trace_btrfs_reserve_extent_cluster(root,
  5055. block_group, search_start,
  5056. num_bytes);
  5057. goto checks;
  5058. }
  5059. } else if (!cached && loop > LOOP_CACHING_NOWAIT
  5060. && !failed_cluster_refill) {
  5061. spin_unlock(&last_ptr->refill_lock);
  5062. failed_cluster_refill = true;
  5063. wait_block_group_cache_progress(block_group,
  5064. num_bytes + empty_cluster + empty_size);
  5065. goto have_block_group;
  5066. }
  5067. /*
  5068. * at this point we either didn't find a cluster
  5069. * or we weren't able to allocate a block from our
  5070. * cluster. Free the cluster we've been trying
  5071. * to use, and go to the next block group
  5072. */
  5073. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  5074. spin_unlock(&last_ptr->refill_lock);
  5075. goto loop;
  5076. }
  5077. unclustered_alloc:
  5078. spin_lock(&block_group->free_space_ctl->tree_lock);
  5079. if (cached &&
  5080. block_group->free_space_ctl->free_space <
  5081. num_bytes + empty_cluster + empty_size) {
  5082. spin_unlock(&block_group->free_space_ctl->tree_lock);
  5083. goto loop;
  5084. }
  5085. spin_unlock(&block_group->free_space_ctl->tree_lock);
  5086. offset = btrfs_find_space_for_alloc(block_group, search_start,
  5087. num_bytes, empty_size);
  5088. /*
  5089. * If we didn't find a chunk, and we haven't failed on this
  5090. * block group before, and this block group is in the middle of
  5091. * caching and we are ok with waiting, then go ahead and wait
  5092. * for progress to be made, and set failed_alloc to true.
  5093. *
  5094. * If failed_alloc is true then we've already waited on this
  5095. * block group once and should move on to the next block group.
  5096. */
  5097. if (!offset && !failed_alloc && !cached &&
  5098. loop > LOOP_CACHING_NOWAIT) {
  5099. wait_block_group_cache_progress(block_group,
  5100. num_bytes + empty_size);
  5101. failed_alloc = true;
  5102. goto have_block_group;
  5103. } else if (!offset) {
  5104. if (!cached)
  5105. have_caching_bg = true;
  5106. goto loop;
  5107. }
  5108. checks:
  5109. search_start = stripe_align(root, offset);
  5110. /* move on to the next group */
  5111. if (search_start + num_bytes >
  5112. used_block_group->key.objectid + used_block_group->key.offset) {
  5113. btrfs_add_free_space(used_block_group, offset, num_bytes);
  5114. goto loop;
  5115. }
  5116. if (offset < search_start)
  5117. btrfs_add_free_space(used_block_group, offset,
  5118. search_start - offset);
  5119. BUG_ON(offset > search_start);
  5120. ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
  5121. alloc_type);
  5122. if (ret == -EAGAIN) {
  5123. btrfs_add_free_space(used_block_group, offset, num_bytes);
  5124. goto loop;
  5125. }
  5126. /* we are all good, lets return */
  5127. ins->objectid = search_start;
  5128. ins->offset = num_bytes;
  5129. trace_btrfs_reserve_extent(orig_root, block_group,
  5130. search_start, num_bytes);
  5131. if (offset < search_start)
  5132. btrfs_add_free_space(used_block_group, offset,
  5133. search_start - offset);
  5134. BUG_ON(offset > search_start);
  5135. if (used_block_group != block_group)
  5136. btrfs_put_block_group(used_block_group);
  5137. btrfs_put_block_group(block_group);
  5138. break;
  5139. loop:
  5140. failed_cluster_refill = false;
  5141. failed_alloc = false;
  5142. BUG_ON(index != get_block_group_index(block_group));
  5143. if (used_block_group != block_group)
  5144. btrfs_put_block_group(used_block_group);
  5145. btrfs_put_block_group(block_group);
  5146. }
  5147. up_read(&space_info->groups_sem);
  5148. if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
  5149. goto search;
  5150. if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
  5151. goto search;
  5152. /*
  5153. * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
  5154. * caching kthreads as we move along
  5155. * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
  5156. * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
  5157. * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
  5158. * again
  5159. */
  5160. if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
  5161. index = 0;
  5162. loop++;
  5163. if (loop == LOOP_ALLOC_CHUNK) {
  5164. if (allowed_chunk_alloc) {
  5165. ret = do_chunk_alloc(trans, root, num_bytes +
  5166. 2 * 1024 * 1024, data,
  5167. CHUNK_ALLOC_LIMITED);
  5168. /*
  5169. * Do not bail out on ENOSPC since we
  5170. * can do more things.
  5171. */
  5172. if (ret < 0 && ret != -ENOSPC) {
  5173. btrfs_abort_transaction(trans,
  5174. root, ret);
  5175. goto out;
  5176. }
  5177. allowed_chunk_alloc = 0;
  5178. if (ret == 1)
  5179. done_chunk_alloc = 1;
  5180. } else if (!done_chunk_alloc &&
  5181. space_info->force_alloc ==
  5182. CHUNK_ALLOC_NO_FORCE) {
  5183. space_info->force_alloc = CHUNK_ALLOC_LIMITED;
  5184. }
  5185. /*
  5186. * We didn't allocate a chunk, go ahead and drop the
  5187. * empty size and loop again.
  5188. */
  5189. if (!done_chunk_alloc)
  5190. loop = LOOP_NO_EMPTY_SIZE;
  5191. }
  5192. if (loop == LOOP_NO_EMPTY_SIZE) {
  5193. empty_size = 0;
  5194. empty_cluster = 0;
  5195. }
  5196. goto search;
  5197. } else if (!ins->objectid) {
  5198. ret = -ENOSPC;
  5199. } else if (ins->objectid) {
  5200. ret = 0;
  5201. }
  5202. out:
  5203. return ret;
  5204. }
  5205. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  5206. int dump_block_groups)
  5207. {
  5208. struct btrfs_block_group_cache *cache;
  5209. int index = 0;
  5210. spin_lock(&info->lock);
  5211. printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
  5212. (unsigned long long)info->flags,
  5213. (unsigned long long)(info->total_bytes - info->bytes_used -
  5214. info->bytes_pinned - info->bytes_reserved -
  5215. info->bytes_readonly),
  5216. (info->full) ? "" : "not ");
  5217. printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
  5218. "reserved=%llu, may_use=%llu, readonly=%llu\n",
  5219. (unsigned long long)info->total_bytes,
  5220. (unsigned long long)info->bytes_used,
  5221. (unsigned long long)info->bytes_pinned,
  5222. (unsigned long long)info->bytes_reserved,
  5223. (unsigned long long)info->bytes_may_use,
  5224. (unsigned long long)info->bytes_readonly);
  5225. spin_unlock(&info->lock);
  5226. if (!dump_block_groups)
  5227. return;
  5228. down_read(&info->groups_sem);
  5229. again:
  5230. list_for_each_entry(cache, &info->block_groups[index], list) {
  5231. spin_lock(&cache->lock);
  5232. printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
  5233. (unsigned long long)cache->key.objectid,
  5234. (unsigned long long)cache->key.offset,
  5235. (unsigned long long)btrfs_block_group_used(&cache->item),
  5236. (unsigned long long)cache->pinned,
  5237. (unsigned long long)cache->reserved,
  5238. cache->ro ? "[readonly]" : "");
  5239. btrfs_dump_free_space(cache, bytes);
  5240. spin_unlock(&cache->lock);
  5241. }
  5242. if (++index < BTRFS_NR_RAID_TYPES)
  5243. goto again;
  5244. up_read(&info->groups_sem);
  5245. }
  5246. int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
  5247. struct btrfs_root *root,
  5248. u64 num_bytes, u64 min_alloc_size,
  5249. u64 empty_size, u64 hint_byte,
  5250. struct btrfs_key *ins, u64 data)
  5251. {
  5252. bool final_tried = false;
  5253. int ret;
  5254. data = btrfs_get_alloc_profile(root, data);
  5255. again:
  5256. /*
  5257. * the only place that sets empty_size is btrfs_realloc_node, which
  5258. * is not called recursively on allocations
  5259. */
  5260. if (empty_size || root->ref_cows) {
  5261. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  5262. num_bytes + 2 * 1024 * 1024, data,
  5263. CHUNK_ALLOC_NO_FORCE);
  5264. if (ret < 0 && ret != -ENOSPC) {
  5265. btrfs_abort_transaction(trans, root, ret);
  5266. return ret;
  5267. }
  5268. }
  5269. WARN_ON(num_bytes < root->sectorsize);
  5270. ret = find_free_extent(trans, root, num_bytes, empty_size,
  5271. hint_byte, ins, data);
  5272. if (ret == -ENOSPC) {
  5273. if (!final_tried) {
  5274. num_bytes = num_bytes >> 1;
  5275. num_bytes = num_bytes & ~(root->sectorsize - 1);
  5276. num_bytes = max(num_bytes, min_alloc_size);
  5277. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  5278. num_bytes, data, CHUNK_ALLOC_FORCE);
  5279. if (ret < 0 && ret != -ENOSPC) {
  5280. btrfs_abort_transaction(trans, root, ret);
  5281. return ret;
  5282. }
  5283. if (num_bytes == min_alloc_size)
  5284. final_tried = true;
  5285. goto again;
  5286. } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
  5287. struct btrfs_space_info *sinfo;
  5288. sinfo = __find_space_info(root->fs_info, data);
  5289. printk(KERN_ERR "btrfs allocation failed flags %llu, "
  5290. "wanted %llu\n", (unsigned long long)data,
  5291. (unsigned long long)num_bytes);
  5292. if (sinfo)
  5293. dump_space_info(sinfo, num_bytes, 1);
  5294. }
  5295. }
  5296. trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
  5297. return ret;
  5298. }
  5299. static int __btrfs_free_reserved_extent(struct btrfs_root *root,
  5300. u64 start, u64 len, int pin)
  5301. {
  5302. struct btrfs_block_group_cache *cache;
  5303. int ret = 0;
  5304. cache = btrfs_lookup_block_group(root->fs_info, start);
  5305. if (!cache) {
  5306. printk(KERN_ERR "Unable to find block group for %llu\n",
  5307. (unsigned long long)start);
  5308. return -ENOSPC;
  5309. }
  5310. if (btrfs_test_opt(root, DISCARD))
  5311. ret = btrfs_discard_extent(root, start, len, NULL);
  5312. if (pin)
  5313. pin_down_extent(root, cache, start, len, 1);
  5314. else {
  5315. btrfs_add_free_space(cache, start, len);
  5316. btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
  5317. }
  5318. btrfs_put_block_group(cache);
  5319. trace_btrfs_reserved_extent_free(root, start, len);
  5320. return ret;
  5321. }
  5322. int btrfs_free_reserved_extent(struct btrfs_root *root,
  5323. u64 start, u64 len)
  5324. {
  5325. return __btrfs_free_reserved_extent(root, start, len, 0);
  5326. }
  5327. int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
  5328. u64 start, u64 len)
  5329. {
  5330. return __btrfs_free_reserved_extent(root, start, len, 1);
  5331. }
  5332. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  5333. struct btrfs_root *root,
  5334. u64 parent, u64 root_objectid,
  5335. u64 flags, u64 owner, u64 offset,
  5336. struct btrfs_key *ins, int ref_mod)
  5337. {
  5338. int ret;
  5339. struct btrfs_fs_info *fs_info = root->fs_info;
  5340. struct btrfs_extent_item *extent_item;
  5341. struct btrfs_extent_inline_ref *iref;
  5342. struct btrfs_path *path;
  5343. struct extent_buffer *leaf;
  5344. int type;
  5345. u32 size;
  5346. if (parent > 0)
  5347. type = BTRFS_SHARED_DATA_REF_KEY;
  5348. else
  5349. type = BTRFS_EXTENT_DATA_REF_KEY;
  5350. size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
  5351. path = btrfs_alloc_path();
  5352. if (!path)
  5353. return -ENOMEM;
  5354. path->leave_spinning = 1;
  5355. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  5356. ins, size);
  5357. if (ret) {
  5358. btrfs_free_path(path);
  5359. return ret;
  5360. }
  5361. leaf = path->nodes[0];
  5362. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  5363. struct btrfs_extent_item);
  5364. btrfs_set_extent_refs(leaf, extent_item, ref_mod);
  5365. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  5366. btrfs_set_extent_flags(leaf, extent_item,
  5367. flags | BTRFS_EXTENT_FLAG_DATA);
  5368. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  5369. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  5370. if (parent > 0) {
  5371. struct btrfs_shared_data_ref *ref;
  5372. ref = (struct btrfs_shared_data_ref *)(iref + 1);
  5373. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  5374. btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
  5375. } else {
  5376. struct btrfs_extent_data_ref *ref;
  5377. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  5378. btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
  5379. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  5380. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  5381. btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
  5382. }
  5383. btrfs_mark_buffer_dirty(path->nodes[0]);
  5384. btrfs_free_path(path);
  5385. ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
  5386. if (ret) { /* -ENOENT, logic error */
  5387. printk(KERN_ERR "btrfs update block group failed for %llu "
  5388. "%llu\n", (unsigned long long)ins->objectid,
  5389. (unsigned long long)ins->offset);
  5390. BUG();
  5391. }
  5392. return ret;
  5393. }
  5394. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  5395. struct btrfs_root *root,
  5396. u64 parent, u64 root_objectid,
  5397. u64 flags, struct btrfs_disk_key *key,
  5398. int level, struct btrfs_key *ins)
  5399. {
  5400. int ret;
  5401. struct btrfs_fs_info *fs_info = root->fs_info;
  5402. struct btrfs_extent_item *extent_item;
  5403. struct btrfs_tree_block_info *block_info;
  5404. struct btrfs_extent_inline_ref *iref;
  5405. struct btrfs_path *path;
  5406. struct extent_buffer *leaf;
  5407. u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
  5408. path = btrfs_alloc_path();
  5409. if (!path)
  5410. return -ENOMEM;
  5411. path->leave_spinning = 1;
  5412. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  5413. ins, size);
  5414. if (ret) {
  5415. btrfs_free_path(path);
  5416. return ret;
  5417. }
  5418. leaf = path->nodes[0];
  5419. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  5420. struct btrfs_extent_item);
  5421. btrfs_set_extent_refs(leaf, extent_item, 1);
  5422. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  5423. btrfs_set_extent_flags(leaf, extent_item,
  5424. flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
  5425. block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
  5426. btrfs_set_tree_block_key(leaf, block_info, key);
  5427. btrfs_set_tree_block_level(leaf, block_info, level);
  5428. iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
  5429. if (parent > 0) {
  5430. BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
  5431. btrfs_set_extent_inline_ref_type(leaf, iref,
  5432. BTRFS_SHARED_BLOCK_REF_KEY);
  5433. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  5434. } else {
  5435. btrfs_set_extent_inline_ref_type(leaf, iref,
  5436. BTRFS_TREE_BLOCK_REF_KEY);
  5437. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  5438. }
  5439. btrfs_mark_buffer_dirty(leaf);
  5440. btrfs_free_path(path);
  5441. ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
  5442. if (ret) { /* -ENOENT, logic error */
  5443. printk(KERN_ERR "btrfs update block group failed for %llu "
  5444. "%llu\n", (unsigned long long)ins->objectid,
  5445. (unsigned long long)ins->offset);
  5446. BUG();
  5447. }
  5448. return ret;
  5449. }
  5450. int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  5451. struct btrfs_root *root,
  5452. u64 root_objectid, u64 owner,
  5453. u64 offset, struct btrfs_key *ins)
  5454. {
  5455. int ret;
  5456. BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
  5457. ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
  5458. ins->offset, 0,
  5459. root_objectid, owner, offset,
  5460. BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
  5461. return ret;
  5462. }
  5463. /*
  5464. * this is used by the tree logging recovery code. It records that
  5465. * an extent has been allocated and makes sure to clear the free
  5466. * space cache bits as well
  5467. */
  5468. int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
  5469. struct btrfs_root *root,
  5470. u64 root_objectid, u64 owner, u64 offset,
  5471. struct btrfs_key *ins)
  5472. {
  5473. int ret;
  5474. struct btrfs_block_group_cache *block_group;
  5475. struct btrfs_caching_control *caching_ctl;
  5476. u64 start = ins->objectid;
  5477. u64 num_bytes = ins->offset;
  5478. block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
  5479. cache_block_group(block_group, trans, NULL, 0);
  5480. caching_ctl = get_caching_control(block_group);
  5481. if (!caching_ctl) {
  5482. BUG_ON(!block_group_cache_done(block_group));
  5483. ret = btrfs_remove_free_space(block_group, start, num_bytes);
  5484. BUG_ON(ret); /* -ENOMEM */
  5485. } else {
  5486. mutex_lock(&caching_ctl->mutex);
  5487. if (start >= caching_ctl->progress) {
  5488. ret = add_excluded_extent(root, start, num_bytes);
  5489. BUG_ON(ret); /* -ENOMEM */
  5490. } else if (start + num_bytes <= caching_ctl->progress) {
  5491. ret = btrfs_remove_free_space(block_group,
  5492. start, num_bytes);
  5493. BUG_ON(ret); /* -ENOMEM */
  5494. } else {
  5495. num_bytes = caching_ctl->progress - start;
  5496. ret = btrfs_remove_free_space(block_group,
  5497. start, num_bytes);
  5498. BUG_ON(ret); /* -ENOMEM */
  5499. start = caching_ctl->progress;
  5500. num_bytes = ins->objectid + ins->offset -
  5501. caching_ctl->progress;
  5502. ret = add_excluded_extent(root, start, num_bytes);
  5503. BUG_ON(ret); /* -ENOMEM */
  5504. }
  5505. mutex_unlock(&caching_ctl->mutex);
  5506. put_caching_control(caching_ctl);
  5507. }
  5508. ret = btrfs_update_reserved_bytes(block_group, ins->offset,
  5509. RESERVE_ALLOC_NO_ACCOUNT);
  5510. BUG_ON(ret); /* logic error */
  5511. btrfs_put_block_group(block_group);
  5512. ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
  5513. 0, owner, offset, ins, 1);
  5514. return ret;
  5515. }
  5516. struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
  5517. struct btrfs_root *root,
  5518. u64 bytenr, u32 blocksize,
  5519. int level)
  5520. {
  5521. struct extent_buffer *buf;
  5522. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  5523. if (!buf)
  5524. return ERR_PTR(-ENOMEM);
  5525. btrfs_set_header_generation(buf, trans->transid);
  5526. btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
  5527. btrfs_tree_lock(buf);
  5528. clean_tree_block(trans, root, buf);
  5529. clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
  5530. btrfs_set_lock_blocking(buf);
  5531. btrfs_set_buffer_uptodate(buf);
  5532. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  5533. /*
  5534. * we allow two log transactions at a time, use different
  5535. * EXENT bit to differentiate dirty pages.
  5536. */
  5537. if (root->log_transid % 2 == 0)
  5538. set_extent_dirty(&root->dirty_log_pages, buf->start,
  5539. buf->start + buf->len - 1, GFP_NOFS);
  5540. else
  5541. set_extent_new(&root->dirty_log_pages, buf->start,
  5542. buf->start + buf->len - 1, GFP_NOFS);
  5543. } else {
  5544. set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
  5545. buf->start + buf->len - 1, GFP_NOFS);
  5546. }
  5547. trans->blocks_used++;
  5548. /* this returns a buffer locked for blocking */
  5549. return buf;
  5550. }
  5551. static struct btrfs_block_rsv *
  5552. use_block_rsv(struct btrfs_trans_handle *trans,
  5553. struct btrfs_root *root, u32 blocksize)
  5554. {
  5555. struct btrfs_block_rsv *block_rsv;
  5556. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  5557. int ret;
  5558. block_rsv = get_block_rsv(trans, root);
  5559. if (block_rsv->size == 0) {
  5560. ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
  5561. /*
  5562. * If we couldn't reserve metadata bytes try and use some from
  5563. * the global reserve.
  5564. */
  5565. if (ret && block_rsv != global_rsv) {
  5566. ret = block_rsv_use_bytes(global_rsv, blocksize);
  5567. if (!ret)
  5568. return global_rsv;
  5569. return ERR_PTR(ret);
  5570. } else if (ret) {
  5571. return ERR_PTR(ret);
  5572. }
  5573. return block_rsv;
  5574. }
  5575. ret = block_rsv_use_bytes(block_rsv, blocksize);
  5576. if (!ret)
  5577. return block_rsv;
  5578. if (ret && !block_rsv->failfast) {
  5579. static DEFINE_RATELIMIT_STATE(_rs,
  5580. DEFAULT_RATELIMIT_INTERVAL,
  5581. /*DEFAULT_RATELIMIT_BURST*/ 2);
  5582. if (__ratelimit(&_rs)) {
  5583. printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
  5584. WARN_ON(1);
  5585. }
  5586. ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
  5587. if (!ret) {
  5588. return block_rsv;
  5589. } else if (ret && block_rsv != global_rsv) {
  5590. ret = block_rsv_use_bytes(global_rsv, blocksize);
  5591. if (!ret)
  5592. return global_rsv;
  5593. }
  5594. }
  5595. return ERR_PTR(-ENOSPC);
  5596. }
  5597. static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
  5598. struct btrfs_block_rsv *block_rsv, u32 blocksize)
  5599. {
  5600. block_rsv_add_bytes(block_rsv, blocksize, 0);
  5601. block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
  5602. }
  5603. /*
  5604. * finds a free extent and does all the dirty work required for allocation
  5605. * returns the key for the extent through ins, and a tree buffer for
  5606. * the first block of the extent through buf.
  5607. *
  5608. * returns the tree buffer or NULL.
  5609. */
  5610. struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
  5611. struct btrfs_root *root, u32 blocksize,
  5612. u64 parent, u64 root_objectid,
  5613. struct btrfs_disk_key *key, int level,
  5614. u64 hint, u64 empty_size)
  5615. {
  5616. struct btrfs_key ins;
  5617. struct btrfs_block_rsv *block_rsv;
  5618. struct extent_buffer *buf;
  5619. u64 flags = 0;
  5620. int ret;
  5621. block_rsv = use_block_rsv(trans, root, blocksize);
  5622. if (IS_ERR(block_rsv))
  5623. return ERR_CAST(block_rsv);
  5624. ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
  5625. empty_size, hint, &ins, 0);
  5626. if (ret) {
  5627. unuse_block_rsv(root->fs_info, block_rsv, blocksize);
  5628. return ERR_PTR(ret);
  5629. }
  5630. buf = btrfs_init_new_buffer(trans, root, ins.objectid,
  5631. blocksize, level);
  5632. BUG_ON(IS_ERR(buf)); /* -ENOMEM */
  5633. if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
  5634. if (parent == 0)
  5635. parent = ins.objectid;
  5636. flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
  5637. } else
  5638. BUG_ON(parent > 0);
  5639. if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
  5640. struct btrfs_delayed_extent_op *extent_op;
  5641. extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
  5642. BUG_ON(!extent_op); /* -ENOMEM */
  5643. if (key)
  5644. memcpy(&extent_op->key, key, sizeof(extent_op->key));
  5645. else
  5646. memset(&extent_op->key, 0, sizeof(extent_op->key));
  5647. extent_op->flags_to_set = flags;
  5648. extent_op->update_key = 1;
  5649. extent_op->update_flags = 1;
  5650. extent_op->is_data = 0;
  5651. ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
  5652. ins.objectid,
  5653. ins.offset, parent, root_objectid,
  5654. level, BTRFS_ADD_DELAYED_EXTENT,
  5655. extent_op, 0);
  5656. BUG_ON(ret); /* -ENOMEM */
  5657. }
  5658. return buf;
  5659. }
  5660. struct walk_control {
  5661. u64 refs[BTRFS_MAX_LEVEL];
  5662. u64 flags[BTRFS_MAX_LEVEL];
  5663. struct btrfs_key update_progress;
  5664. int stage;
  5665. int level;
  5666. int shared_level;
  5667. int update_ref;
  5668. int keep_locks;
  5669. int reada_slot;
  5670. int reada_count;
  5671. int for_reloc;
  5672. };
  5673. #define DROP_REFERENCE 1
  5674. #define UPDATE_BACKREF 2
  5675. static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
  5676. struct btrfs_root *root,
  5677. struct walk_control *wc,
  5678. struct btrfs_path *path)
  5679. {
  5680. u64 bytenr;
  5681. u64 generation;
  5682. u64 refs;
  5683. u64 flags;
  5684. u32 nritems;
  5685. u32 blocksize;
  5686. struct btrfs_key key;
  5687. struct extent_buffer *eb;
  5688. int ret;
  5689. int slot;
  5690. int nread = 0;
  5691. if (path->slots[wc->level] < wc->reada_slot) {
  5692. wc->reada_count = wc->reada_count * 2 / 3;
  5693. wc->reada_count = max(wc->reada_count, 2);
  5694. } else {
  5695. wc->reada_count = wc->reada_count * 3 / 2;
  5696. wc->reada_count = min_t(int, wc->reada_count,
  5697. BTRFS_NODEPTRS_PER_BLOCK(root));
  5698. }
  5699. eb = path->nodes[wc->level];
  5700. nritems = btrfs_header_nritems(eb);
  5701. blocksize = btrfs_level_size(root, wc->level - 1);
  5702. for (slot = path->slots[wc->level]; slot < nritems; slot++) {
  5703. if (nread >= wc->reada_count)
  5704. break;
  5705. cond_resched();
  5706. bytenr = btrfs_node_blockptr(eb, slot);
  5707. generation = btrfs_node_ptr_generation(eb, slot);
  5708. if (slot == path->slots[wc->level])
  5709. goto reada;
  5710. if (wc->stage == UPDATE_BACKREF &&
  5711. generation <= root->root_key.offset)
  5712. continue;
  5713. /* We don't lock the tree block, it's OK to be racy here */
  5714. ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
  5715. &refs, &flags);
  5716. /* We don't care about errors in readahead. */
  5717. if (ret < 0)
  5718. continue;
  5719. BUG_ON(refs == 0);
  5720. if (wc->stage == DROP_REFERENCE) {
  5721. if (refs == 1)
  5722. goto reada;
  5723. if (wc->level == 1 &&
  5724. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5725. continue;
  5726. if (!wc->update_ref ||
  5727. generation <= root->root_key.offset)
  5728. continue;
  5729. btrfs_node_key_to_cpu(eb, &key, slot);
  5730. ret = btrfs_comp_cpu_keys(&key,
  5731. &wc->update_progress);
  5732. if (ret < 0)
  5733. continue;
  5734. } else {
  5735. if (wc->level == 1 &&
  5736. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5737. continue;
  5738. }
  5739. reada:
  5740. ret = readahead_tree_block(root, bytenr, blocksize,
  5741. generation);
  5742. if (ret)
  5743. break;
  5744. nread++;
  5745. }
  5746. wc->reada_slot = slot;
  5747. }
  5748. /*
  5749. * hepler to process tree block while walking down the tree.
  5750. *
  5751. * when wc->stage == UPDATE_BACKREF, this function updates
  5752. * back refs for pointers in the block.
  5753. *
  5754. * NOTE: return value 1 means we should stop walking down.
  5755. */
  5756. static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
  5757. struct btrfs_root *root,
  5758. struct btrfs_path *path,
  5759. struct walk_control *wc, int lookup_info)
  5760. {
  5761. int level = wc->level;
  5762. struct extent_buffer *eb = path->nodes[level];
  5763. u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  5764. int ret;
  5765. if (wc->stage == UPDATE_BACKREF &&
  5766. btrfs_header_owner(eb) != root->root_key.objectid)
  5767. return 1;
  5768. /*
  5769. * when reference count of tree block is 1, it won't increase
  5770. * again. once full backref flag is set, we never clear it.
  5771. */
  5772. if (lookup_info &&
  5773. ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
  5774. (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
  5775. BUG_ON(!path->locks[level]);
  5776. ret = btrfs_lookup_extent_info(trans, root,
  5777. eb->start, eb->len,
  5778. &wc->refs[level],
  5779. &wc->flags[level]);
  5780. BUG_ON(ret == -ENOMEM);
  5781. if (ret)
  5782. return ret;
  5783. BUG_ON(wc->refs[level] == 0);
  5784. }
  5785. if (wc->stage == DROP_REFERENCE) {
  5786. if (wc->refs[level] > 1)
  5787. return 1;
  5788. if (path->locks[level] && !wc->keep_locks) {
  5789. btrfs_tree_unlock_rw(eb, path->locks[level]);
  5790. path->locks[level] = 0;
  5791. }
  5792. return 0;
  5793. }
  5794. /* wc->stage == UPDATE_BACKREF */
  5795. if (!(wc->flags[level] & flag)) {
  5796. BUG_ON(!path->locks[level]);
  5797. ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
  5798. BUG_ON(ret); /* -ENOMEM */
  5799. ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
  5800. BUG_ON(ret); /* -ENOMEM */
  5801. ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
  5802. eb->len, flag, 0);
  5803. BUG_ON(ret); /* -ENOMEM */
  5804. wc->flags[level] |= flag;
  5805. }
  5806. /*
  5807. * the block is shared by multiple trees, so it's not good to
  5808. * keep the tree lock
  5809. */
  5810. if (path->locks[level] && level > 0) {
  5811. btrfs_tree_unlock_rw(eb, path->locks[level]);
  5812. path->locks[level] = 0;
  5813. }
  5814. return 0;
  5815. }
  5816. /*
  5817. * hepler to process tree block pointer.
  5818. *
  5819. * when wc->stage == DROP_REFERENCE, this function checks
  5820. * reference count of the block pointed to. if the block
  5821. * is shared and we need update back refs for the subtree
  5822. * rooted at the block, this function changes wc->stage to
  5823. * UPDATE_BACKREF. if the block is shared and there is no
  5824. * need to update back, this function drops the reference
  5825. * to the block.
  5826. *
  5827. * NOTE: return value 1 means we should stop walking down.
  5828. */
  5829. static noinline int do_walk_down(struct btrfs_trans_handle *trans,
  5830. struct btrfs_root *root,
  5831. struct btrfs_path *path,
  5832. struct walk_control *wc, int *lookup_info)
  5833. {
  5834. u64 bytenr;
  5835. u64 generation;
  5836. u64 parent;
  5837. u32 blocksize;
  5838. struct btrfs_key key;
  5839. struct extent_buffer *next;
  5840. int level = wc->level;
  5841. int reada = 0;
  5842. int ret = 0;
  5843. generation = btrfs_node_ptr_generation(path->nodes[level],
  5844. path->slots[level]);
  5845. /*
  5846. * if the lower level block was created before the snapshot
  5847. * was created, we know there is no need to update back refs
  5848. * for the subtree
  5849. */
  5850. if (wc->stage == UPDATE_BACKREF &&
  5851. generation <= root->root_key.offset) {
  5852. *lookup_info = 1;
  5853. return 1;
  5854. }
  5855. bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
  5856. blocksize = btrfs_level_size(root, level - 1);
  5857. next = btrfs_find_tree_block(root, bytenr, blocksize);
  5858. if (!next) {
  5859. next = btrfs_find_create_tree_block(root, bytenr, blocksize);
  5860. if (!next)
  5861. return -ENOMEM;
  5862. reada = 1;
  5863. }
  5864. btrfs_tree_lock(next);
  5865. btrfs_set_lock_blocking(next);
  5866. ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
  5867. &wc->refs[level - 1],
  5868. &wc->flags[level - 1]);
  5869. if (ret < 0) {
  5870. btrfs_tree_unlock(next);
  5871. return ret;
  5872. }
  5873. BUG_ON(wc->refs[level - 1] == 0);
  5874. *lookup_info = 0;
  5875. if (wc->stage == DROP_REFERENCE) {
  5876. if (wc->refs[level - 1] > 1) {
  5877. if (level == 1 &&
  5878. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5879. goto skip;
  5880. if (!wc->update_ref ||
  5881. generation <= root->root_key.offset)
  5882. goto skip;
  5883. btrfs_node_key_to_cpu(path->nodes[level], &key,
  5884. path->slots[level]);
  5885. ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
  5886. if (ret < 0)
  5887. goto skip;
  5888. wc->stage = UPDATE_BACKREF;
  5889. wc->shared_level = level - 1;
  5890. }
  5891. } else {
  5892. if (level == 1 &&
  5893. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5894. goto skip;
  5895. }
  5896. if (!btrfs_buffer_uptodate(next, generation, 0)) {
  5897. btrfs_tree_unlock(next);
  5898. free_extent_buffer(next);
  5899. next = NULL;
  5900. *lookup_info = 1;
  5901. }
  5902. if (!next) {
  5903. if (reada && level == 1)
  5904. reada_walk_down(trans, root, wc, path);
  5905. next = read_tree_block(root, bytenr, blocksize, generation);
  5906. if (!next)
  5907. return -EIO;
  5908. btrfs_tree_lock(next);
  5909. btrfs_set_lock_blocking(next);
  5910. }
  5911. level--;
  5912. BUG_ON(level != btrfs_header_level(next));
  5913. path->nodes[level] = next;
  5914. path->slots[level] = 0;
  5915. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  5916. wc->level = level;
  5917. if (wc->level == 1)
  5918. wc->reada_slot = 0;
  5919. return 0;
  5920. skip:
  5921. wc->refs[level - 1] = 0;
  5922. wc->flags[level - 1] = 0;
  5923. if (wc->stage == DROP_REFERENCE) {
  5924. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
  5925. parent = path->nodes[level]->start;
  5926. } else {
  5927. BUG_ON(root->root_key.objectid !=
  5928. btrfs_header_owner(path->nodes[level]));
  5929. parent = 0;
  5930. }
  5931. ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
  5932. root->root_key.objectid, level - 1, 0, 0);
  5933. BUG_ON(ret); /* -ENOMEM */
  5934. }
  5935. btrfs_tree_unlock(next);
  5936. free_extent_buffer(next);
  5937. *lookup_info = 1;
  5938. return 1;
  5939. }
  5940. /*
  5941. * hepler to process tree block while walking up the tree.
  5942. *
  5943. * when wc->stage == DROP_REFERENCE, this function drops
  5944. * reference count on the block.
  5945. *
  5946. * when wc->stage == UPDATE_BACKREF, this function changes
  5947. * wc->stage back to DROP_REFERENCE if we changed wc->stage
  5948. * to UPDATE_BACKREF previously while processing the block.
  5949. *
  5950. * NOTE: return value 1 means we should stop walking up.
  5951. */
  5952. static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
  5953. struct btrfs_root *root,
  5954. struct btrfs_path *path,
  5955. struct walk_control *wc)
  5956. {
  5957. int ret;
  5958. int level = wc->level;
  5959. struct extent_buffer *eb = path->nodes[level];
  5960. u64 parent = 0;
  5961. if (wc->stage == UPDATE_BACKREF) {
  5962. BUG_ON(wc->shared_level < level);
  5963. if (level < wc->shared_level)
  5964. goto out;
  5965. ret = find_next_key(path, level + 1, &wc->update_progress);
  5966. if (ret > 0)
  5967. wc->update_ref = 0;
  5968. wc->stage = DROP_REFERENCE;
  5969. wc->shared_level = -1;
  5970. path->slots[level] = 0;
  5971. /*
  5972. * check reference count again if the block isn't locked.
  5973. * we should start walking down the tree again if reference
  5974. * count is one.
  5975. */
  5976. if (!path->locks[level]) {
  5977. BUG_ON(level == 0);
  5978. btrfs_tree_lock(eb);
  5979. btrfs_set_lock_blocking(eb);
  5980. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  5981. ret = btrfs_lookup_extent_info(trans, root,
  5982. eb->start, eb->len,
  5983. &wc->refs[level],
  5984. &wc->flags[level]);
  5985. if (ret < 0) {
  5986. btrfs_tree_unlock_rw(eb, path->locks[level]);
  5987. return ret;
  5988. }
  5989. BUG_ON(wc->refs[level] == 0);
  5990. if (wc->refs[level] == 1) {
  5991. btrfs_tree_unlock_rw(eb, path->locks[level]);
  5992. return 1;
  5993. }
  5994. }
  5995. }
  5996. /* wc->stage == DROP_REFERENCE */
  5997. BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
  5998. if (wc->refs[level] == 1) {
  5999. if (level == 0) {
  6000. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  6001. ret = btrfs_dec_ref(trans, root, eb, 1,
  6002. wc->for_reloc);
  6003. else
  6004. ret = btrfs_dec_ref(trans, root, eb, 0,
  6005. wc->for_reloc);
  6006. BUG_ON(ret); /* -ENOMEM */
  6007. }
  6008. /* make block locked assertion in clean_tree_block happy */
  6009. if (!path->locks[level] &&
  6010. btrfs_header_generation(eb) == trans->transid) {
  6011. btrfs_tree_lock(eb);
  6012. btrfs_set_lock_blocking(eb);
  6013. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6014. }
  6015. clean_tree_block(trans, root, eb);
  6016. }
  6017. if (eb == root->node) {
  6018. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  6019. parent = eb->start;
  6020. else
  6021. BUG_ON(root->root_key.objectid !=
  6022. btrfs_header_owner(eb));
  6023. } else {
  6024. if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  6025. parent = path->nodes[level + 1]->start;
  6026. else
  6027. BUG_ON(root->root_key.objectid !=
  6028. btrfs_header_owner(path->nodes[level + 1]));
  6029. }
  6030. btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
  6031. out:
  6032. wc->refs[level] = 0;
  6033. wc->flags[level] = 0;
  6034. return 0;
  6035. }
  6036. static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
  6037. struct btrfs_root *root,
  6038. struct btrfs_path *path,
  6039. struct walk_control *wc)
  6040. {
  6041. int level = wc->level;
  6042. int lookup_info = 1;
  6043. int ret;
  6044. while (level >= 0) {
  6045. ret = walk_down_proc(trans, root, path, wc, lookup_info);
  6046. if (ret > 0)
  6047. break;
  6048. if (level == 0)
  6049. break;
  6050. if (path->slots[level] >=
  6051. btrfs_header_nritems(path->nodes[level]))
  6052. break;
  6053. ret = do_walk_down(trans, root, path, wc, &lookup_info);
  6054. if (ret > 0) {
  6055. path->slots[level]++;
  6056. continue;
  6057. } else if (ret < 0)
  6058. return ret;
  6059. level = wc->level;
  6060. }
  6061. return 0;
  6062. }
  6063. static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  6064. struct btrfs_root *root,
  6065. struct btrfs_path *path,
  6066. struct walk_control *wc, int max_level)
  6067. {
  6068. int level = wc->level;
  6069. int ret;
  6070. path->slots[level] = btrfs_header_nritems(path->nodes[level]);
  6071. while (level < max_level && path->nodes[level]) {
  6072. wc->level = level;
  6073. if (path->slots[level] + 1 <
  6074. btrfs_header_nritems(path->nodes[level])) {
  6075. path->slots[level]++;
  6076. return 0;
  6077. } else {
  6078. ret = walk_up_proc(trans, root, path, wc);
  6079. if (ret > 0)
  6080. return 0;
  6081. if (path->locks[level]) {
  6082. btrfs_tree_unlock_rw(path->nodes[level],
  6083. path->locks[level]);
  6084. path->locks[level] = 0;
  6085. }
  6086. free_extent_buffer(path->nodes[level]);
  6087. path->nodes[level] = NULL;
  6088. level++;
  6089. }
  6090. }
  6091. return 1;
  6092. }
  6093. /*
  6094. * drop a subvolume tree.
  6095. *
  6096. * this function traverses the tree freeing any blocks that only
  6097. * referenced by the tree.
  6098. *
  6099. * when a shared tree block is found. this function decreases its
  6100. * reference count by one. if update_ref is true, this function
  6101. * also make sure backrefs for the shared block and all lower level
  6102. * blocks are properly updated.
  6103. */
  6104. int btrfs_drop_snapshot(struct btrfs_root *root,
  6105. struct btrfs_block_rsv *block_rsv, int update_ref,
  6106. int for_reloc)
  6107. {
  6108. struct btrfs_path *path;
  6109. struct btrfs_trans_handle *trans;
  6110. struct btrfs_root *tree_root = root->fs_info->tree_root;
  6111. struct btrfs_root_item *root_item = &root->root_item;
  6112. struct walk_control *wc;
  6113. struct btrfs_key key;
  6114. int err = 0;
  6115. int ret;
  6116. int level;
  6117. path = btrfs_alloc_path();
  6118. if (!path) {
  6119. err = -ENOMEM;
  6120. goto out;
  6121. }
  6122. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  6123. if (!wc) {
  6124. btrfs_free_path(path);
  6125. err = -ENOMEM;
  6126. goto out;
  6127. }
  6128. trans = btrfs_start_transaction(tree_root, 0);
  6129. if (IS_ERR(trans)) {
  6130. err = PTR_ERR(trans);
  6131. goto out_free;
  6132. }
  6133. if (block_rsv)
  6134. trans->block_rsv = block_rsv;
  6135. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  6136. level = btrfs_header_level(root->node);
  6137. path->nodes[level] = btrfs_lock_root_node(root);
  6138. btrfs_set_lock_blocking(path->nodes[level]);
  6139. path->slots[level] = 0;
  6140. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6141. memset(&wc->update_progress, 0,
  6142. sizeof(wc->update_progress));
  6143. } else {
  6144. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  6145. memcpy(&wc->update_progress, &key,
  6146. sizeof(wc->update_progress));
  6147. level = root_item->drop_level;
  6148. BUG_ON(level == 0);
  6149. path->lowest_level = level;
  6150. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  6151. path->lowest_level = 0;
  6152. if (ret < 0) {
  6153. err = ret;
  6154. goto out_end_trans;
  6155. }
  6156. WARN_ON(ret > 0);
  6157. /*
  6158. * unlock our path, this is safe because only this
  6159. * function is allowed to delete this snapshot
  6160. */
  6161. btrfs_unlock_up_safe(path, 0);
  6162. level = btrfs_header_level(root->node);
  6163. while (1) {
  6164. btrfs_tree_lock(path->nodes[level]);
  6165. btrfs_set_lock_blocking(path->nodes[level]);
  6166. ret = btrfs_lookup_extent_info(trans, root,
  6167. path->nodes[level]->start,
  6168. path->nodes[level]->len,
  6169. &wc->refs[level],
  6170. &wc->flags[level]);
  6171. if (ret < 0) {
  6172. err = ret;
  6173. goto out_end_trans;
  6174. }
  6175. BUG_ON(wc->refs[level] == 0);
  6176. if (level == root_item->drop_level)
  6177. break;
  6178. btrfs_tree_unlock(path->nodes[level]);
  6179. WARN_ON(wc->refs[level] != 1);
  6180. level--;
  6181. }
  6182. }
  6183. wc->level = level;
  6184. wc->shared_level = -1;
  6185. wc->stage = DROP_REFERENCE;
  6186. wc->update_ref = update_ref;
  6187. wc->keep_locks = 0;
  6188. wc->for_reloc = for_reloc;
  6189. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  6190. while (1) {
  6191. ret = walk_down_tree(trans, root, path, wc);
  6192. if (ret < 0) {
  6193. err = ret;
  6194. break;
  6195. }
  6196. ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
  6197. if (ret < 0) {
  6198. err = ret;
  6199. break;
  6200. }
  6201. if (ret > 0) {
  6202. BUG_ON(wc->stage != DROP_REFERENCE);
  6203. break;
  6204. }
  6205. if (wc->stage == DROP_REFERENCE) {
  6206. level = wc->level;
  6207. btrfs_node_key(path->nodes[level],
  6208. &root_item->drop_progress,
  6209. path->slots[level]);
  6210. root_item->drop_level = level;
  6211. }
  6212. BUG_ON(wc->level == 0);
  6213. if (btrfs_should_end_transaction(trans, tree_root)) {
  6214. ret = btrfs_update_root(trans, tree_root,
  6215. &root->root_key,
  6216. root_item);
  6217. if (ret) {
  6218. btrfs_abort_transaction(trans, tree_root, ret);
  6219. err = ret;
  6220. goto out_end_trans;
  6221. }
  6222. btrfs_end_transaction_throttle(trans, tree_root);
  6223. trans = btrfs_start_transaction(tree_root, 0);
  6224. if (IS_ERR(trans)) {
  6225. err = PTR_ERR(trans);
  6226. goto out_free;
  6227. }
  6228. if (block_rsv)
  6229. trans->block_rsv = block_rsv;
  6230. }
  6231. }
  6232. btrfs_release_path(path);
  6233. if (err)
  6234. goto out_end_trans;
  6235. ret = btrfs_del_root(trans, tree_root, &root->root_key);
  6236. if (ret) {
  6237. btrfs_abort_transaction(trans, tree_root, ret);
  6238. goto out_end_trans;
  6239. }
  6240. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  6241. ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
  6242. NULL, NULL);
  6243. if (ret < 0) {
  6244. btrfs_abort_transaction(trans, tree_root, ret);
  6245. err = ret;
  6246. goto out_end_trans;
  6247. } else if (ret > 0) {
  6248. /* if we fail to delete the orphan item this time
  6249. * around, it'll get picked up the next time.
  6250. *
  6251. * The most common failure here is just -ENOENT.
  6252. */
  6253. btrfs_del_orphan_item(trans, tree_root,
  6254. root->root_key.objectid);
  6255. }
  6256. }
  6257. if (root->in_radix) {
  6258. btrfs_free_fs_root(tree_root->fs_info, root);
  6259. } else {
  6260. free_extent_buffer(root->node);
  6261. free_extent_buffer(root->commit_root);
  6262. kfree(root);
  6263. }
  6264. out_end_trans:
  6265. btrfs_end_transaction_throttle(trans, tree_root);
  6266. out_free:
  6267. kfree(wc);
  6268. btrfs_free_path(path);
  6269. out:
  6270. if (err)
  6271. btrfs_std_error(root->fs_info, err);
  6272. return err;
  6273. }
  6274. /*
  6275. * drop subtree rooted at tree block 'node'.
  6276. *
  6277. * NOTE: this function will unlock and release tree block 'node'
  6278. * only used by relocation code
  6279. */
  6280. int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
  6281. struct btrfs_root *root,
  6282. struct extent_buffer *node,
  6283. struct extent_buffer *parent)
  6284. {
  6285. struct btrfs_path *path;
  6286. struct walk_control *wc;
  6287. int level;
  6288. int parent_level;
  6289. int ret = 0;
  6290. int wret;
  6291. BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  6292. path = btrfs_alloc_path();
  6293. if (!path)
  6294. return -ENOMEM;
  6295. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  6296. if (!wc) {
  6297. btrfs_free_path(path);
  6298. return -ENOMEM;
  6299. }
  6300. btrfs_assert_tree_locked(parent);
  6301. parent_level = btrfs_header_level(parent);
  6302. extent_buffer_get(parent);
  6303. path->nodes[parent_level] = parent;
  6304. path->slots[parent_level] = btrfs_header_nritems(parent);
  6305. btrfs_assert_tree_locked(node);
  6306. level = btrfs_header_level(node);
  6307. path->nodes[level] = node;
  6308. path->slots[level] = 0;
  6309. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6310. wc->refs[parent_level] = 1;
  6311. wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  6312. wc->level = level;
  6313. wc->shared_level = -1;
  6314. wc->stage = DROP_REFERENCE;
  6315. wc->update_ref = 0;
  6316. wc->keep_locks = 1;
  6317. wc->for_reloc = 1;
  6318. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  6319. while (1) {
  6320. wret = walk_down_tree(trans, root, path, wc);
  6321. if (wret < 0) {
  6322. ret = wret;
  6323. break;
  6324. }
  6325. wret = walk_up_tree(trans, root, path, wc, parent_level);
  6326. if (wret < 0)
  6327. ret = wret;
  6328. if (wret != 0)
  6329. break;
  6330. }
  6331. kfree(wc);
  6332. btrfs_free_path(path);
  6333. return ret;
  6334. }
  6335. static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
  6336. {
  6337. u64 num_devices;
  6338. u64 stripped;
  6339. /*
  6340. * if restripe for this chunk_type is on pick target profile and
  6341. * return, otherwise do the usual balance
  6342. */
  6343. stripped = get_restripe_target(root->fs_info, flags);
  6344. if (stripped)
  6345. return extended_to_chunk(stripped);
  6346. /*
  6347. * we add in the count of missing devices because we want
  6348. * to make sure that any RAID levels on a degraded FS
  6349. * continue to be honored.
  6350. */
  6351. num_devices = root->fs_info->fs_devices->rw_devices +
  6352. root->fs_info->fs_devices->missing_devices;
  6353. stripped = BTRFS_BLOCK_GROUP_RAID0 |
  6354. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
  6355. if (num_devices == 1) {
  6356. stripped |= BTRFS_BLOCK_GROUP_DUP;
  6357. stripped = flags & ~stripped;
  6358. /* turn raid0 into single device chunks */
  6359. if (flags & BTRFS_BLOCK_GROUP_RAID0)
  6360. return stripped;
  6361. /* turn mirroring into duplication */
  6362. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  6363. BTRFS_BLOCK_GROUP_RAID10))
  6364. return stripped | BTRFS_BLOCK_GROUP_DUP;
  6365. } else {
  6366. /* they already had raid on here, just return */
  6367. if (flags & stripped)
  6368. return flags;
  6369. stripped |= BTRFS_BLOCK_GROUP_DUP;
  6370. stripped = flags & ~stripped;
  6371. /* switch duplicated blocks with raid1 */
  6372. if (flags & BTRFS_BLOCK_GROUP_DUP)
  6373. return stripped | BTRFS_BLOCK_GROUP_RAID1;
  6374. /* this is drive concat, leave it alone */
  6375. }
  6376. return flags;
  6377. }
  6378. static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
  6379. {
  6380. struct btrfs_space_info *sinfo = cache->space_info;
  6381. u64 num_bytes;
  6382. u64 min_allocable_bytes;
  6383. int ret = -ENOSPC;
  6384. /*
  6385. * We need some metadata space and system metadata space for
  6386. * allocating chunks in some corner cases until we force to set
  6387. * it to be readonly.
  6388. */
  6389. if ((sinfo->flags &
  6390. (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
  6391. !force)
  6392. min_allocable_bytes = 1 * 1024 * 1024;
  6393. else
  6394. min_allocable_bytes = 0;
  6395. spin_lock(&sinfo->lock);
  6396. spin_lock(&cache->lock);
  6397. if (cache->ro) {
  6398. ret = 0;
  6399. goto out;
  6400. }
  6401. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  6402. cache->bytes_super - btrfs_block_group_used(&cache->item);
  6403. if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
  6404. sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
  6405. min_allocable_bytes <= sinfo->total_bytes) {
  6406. sinfo->bytes_readonly += num_bytes;
  6407. cache->ro = 1;
  6408. ret = 0;
  6409. }
  6410. out:
  6411. spin_unlock(&cache->lock);
  6412. spin_unlock(&sinfo->lock);
  6413. return ret;
  6414. }
  6415. int btrfs_set_block_group_ro(struct btrfs_root *root,
  6416. struct btrfs_block_group_cache *cache)
  6417. {
  6418. struct btrfs_trans_handle *trans;
  6419. u64 alloc_flags;
  6420. int ret;
  6421. BUG_ON(cache->ro);
  6422. trans = btrfs_join_transaction(root);
  6423. if (IS_ERR(trans))
  6424. return PTR_ERR(trans);
  6425. alloc_flags = update_block_group_flags(root, cache->flags);
  6426. if (alloc_flags != cache->flags) {
  6427. ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
  6428. CHUNK_ALLOC_FORCE);
  6429. if (ret < 0)
  6430. goto out;
  6431. }
  6432. ret = set_block_group_ro(cache, 0);
  6433. if (!ret)
  6434. goto out;
  6435. alloc_flags = get_alloc_profile(root, cache->space_info->flags);
  6436. ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
  6437. CHUNK_ALLOC_FORCE);
  6438. if (ret < 0)
  6439. goto out;
  6440. ret = set_block_group_ro(cache, 0);
  6441. out:
  6442. btrfs_end_transaction(trans, root);
  6443. return ret;
  6444. }
  6445. int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
  6446. struct btrfs_root *root, u64 type)
  6447. {
  6448. u64 alloc_flags = get_alloc_profile(root, type);
  6449. return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
  6450. CHUNK_ALLOC_FORCE);
  6451. }
  6452. /*
  6453. * helper to account the unused space of all the readonly block group in the
  6454. * list. takes mirrors into account.
  6455. */
  6456. static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
  6457. {
  6458. struct btrfs_block_group_cache *block_group;
  6459. u64 free_bytes = 0;
  6460. int factor;
  6461. list_for_each_entry(block_group, groups_list, list) {
  6462. spin_lock(&block_group->lock);
  6463. if (!block_group->ro) {
  6464. spin_unlock(&block_group->lock);
  6465. continue;
  6466. }
  6467. if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
  6468. BTRFS_BLOCK_GROUP_RAID10 |
  6469. BTRFS_BLOCK_GROUP_DUP))
  6470. factor = 2;
  6471. else
  6472. factor = 1;
  6473. free_bytes += (block_group->key.offset -
  6474. btrfs_block_group_used(&block_group->item)) *
  6475. factor;
  6476. spin_unlock(&block_group->lock);
  6477. }
  6478. return free_bytes;
  6479. }
  6480. /*
  6481. * helper to account the unused space of all the readonly block group in the
  6482. * space_info. takes mirrors into account.
  6483. */
  6484. u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
  6485. {
  6486. int i;
  6487. u64 free_bytes = 0;
  6488. spin_lock(&sinfo->lock);
  6489. for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  6490. if (!list_empty(&sinfo->block_groups[i]))
  6491. free_bytes += __btrfs_get_ro_block_group_free_space(
  6492. &sinfo->block_groups[i]);
  6493. spin_unlock(&sinfo->lock);
  6494. return free_bytes;
  6495. }
  6496. void btrfs_set_block_group_rw(struct btrfs_root *root,
  6497. struct btrfs_block_group_cache *cache)
  6498. {
  6499. struct btrfs_space_info *sinfo = cache->space_info;
  6500. u64 num_bytes;
  6501. BUG_ON(!cache->ro);
  6502. spin_lock(&sinfo->lock);
  6503. spin_lock(&cache->lock);
  6504. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  6505. cache->bytes_super - btrfs_block_group_used(&cache->item);
  6506. sinfo->bytes_readonly -= num_bytes;
  6507. cache->ro = 0;
  6508. spin_unlock(&cache->lock);
  6509. spin_unlock(&sinfo->lock);
  6510. }
  6511. /*
  6512. * checks to see if its even possible to relocate this block group.
  6513. *
  6514. * @return - -1 if it's not a good idea to relocate this block group, 0 if its
  6515. * ok to go ahead and try.
  6516. */
  6517. int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
  6518. {
  6519. struct btrfs_block_group_cache *block_group;
  6520. struct btrfs_space_info *space_info;
  6521. struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
  6522. struct btrfs_device *device;
  6523. u64 min_free;
  6524. u64 dev_min = 1;
  6525. u64 dev_nr = 0;
  6526. u64 target;
  6527. int index;
  6528. int full = 0;
  6529. int ret = 0;
  6530. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  6531. /* odd, couldn't find the block group, leave it alone */
  6532. if (!block_group)
  6533. return -1;
  6534. min_free = btrfs_block_group_used(&block_group->item);
  6535. /* no bytes used, we're good */
  6536. if (!min_free)
  6537. goto out;
  6538. space_info = block_group->space_info;
  6539. spin_lock(&space_info->lock);
  6540. full = space_info->full;
  6541. /*
  6542. * if this is the last block group we have in this space, we can't
  6543. * relocate it unless we're able to allocate a new chunk below.
  6544. *
  6545. * Otherwise, we need to make sure we have room in the space to handle
  6546. * all of the extents from this block group. If we can, we're good
  6547. */
  6548. if ((space_info->total_bytes != block_group->key.offset) &&
  6549. (space_info->bytes_used + space_info->bytes_reserved +
  6550. space_info->bytes_pinned + space_info->bytes_readonly +
  6551. min_free < space_info->total_bytes)) {
  6552. spin_unlock(&space_info->lock);
  6553. goto out;
  6554. }
  6555. spin_unlock(&space_info->lock);
  6556. /*
  6557. * ok we don't have enough space, but maybe we have free space on our
  6558. * devices to allocate new chunks for relocation, so loop through our
  6559. * alloc devices and guess if we have enough space. if this block
  6560. * group is going to be restriped, run checks against the target
  6561. * profile instead of the current one.
  6562. */
  6563. ret = -1;
  6564. /*
  6565. * index:
  6566. * 0: raid10
  6567. * 1: raid1
  6568. * 2: dup
  6569. * 3: raid0
  6570. * 4: single
  6571. */
  6572. target = get_restripe_target(root->fs_info, block_group->flags);
  6573. if (target) {
  6574. index = __get_block_group_index(extended_to_chunk(target));
  6575. } else {
  6576. /*
  6577. * this is just a balance, so if we were marked as full
  6578. * we know there is no space for a new chunk
  6579. */
  6580. if (full)
  6581. goto out;
  6582. index = get_block_group_index(block_group);
  6583. }
  6584. if (index == 0) {
  6585. dev_min = 4;
  6586. /* Divide by 2 */
  6587. min_free >>= 1;
  6588. } else if (index == 1) {
  6589. dev_min = 2;
  6590. } else if (index == 2) {
  6591. /* Multiply by 2 */
  6592. min_free <<= 1;
  6593. } else if (index == 3) {
  6594. dev_min = fs_devices->rw_devices;
  6595. do_div(min_free, dev_min);
  6596. }
  6597. mutex_lock(&root->fs_info->chunk_mutex);
  6598. list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
  6599. u64 dev_offset;
  6600. /*
  6601. * check to make sure we can actually find a chunk with enough
  6602. * space to fit our block group in.
  6603. */
  6604. if (device->total_bytes > device->bytes_used + min_free) {
  6605. ret = find_free_dev_extent(device, min_free,
  6606. &dev_offset, NULL);
  6607. if (!ret)
  6608. dev_nr++;
  6609. if (dev_nr >= dev_min)
  6610. break;
  6611. ret = -1;
  6612. }
  6613. }
  6614. mutex_unlock(&root->fs_info->chunk_mutex);
  6615. out:
  6616. btrfs_put_block_group(block_group);
  6617. return ret;
  6618. }
  6619. static int find_first_block_group(struct btrfs_root *root,
  6620. struct btrfs_path *path, struct btrfs_key *key)
  6621. {
  6622. int ret = 0;
  6623. struct btrfs_key found_key;
  6624. struct extent_buffer *leaf;
  6625. int slot;
  6626. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  6627. if (ret < 0)
  6628. goto out;
  6629. while (1) {
  6630. slot = path->slots[0];
  6631. leaf = path->nodes[0];
  6632. if (slot >= btrfs_header_nritems(leaf)) {
  6633. ret = btrfs_next_leaf(root, path);
  6634. if (ret == 0)
  6635. continue;
  6636. if (ret < 0)
  6637. goto out;
  6638. break;
  6639. }
  6640. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  6641. if (found_key.objectid >= key->objectid &&
  6642. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  6643. ret = 0;
  6644. goto out;
  6645. }
  6646. path->slots[0]++;
  6647. }
  6648. out:
  6649. return ret;
  6650. }
  6651. void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
  6652. {
  6653. struct btrfs_block_group_cache *block_group;
  6654. u64 last = 0;
  6655. while (1) {
  6656. struct inode *inode;
  6657. block_group = btrfs_lookup_first_block_group(info, last);
  6658. while (block_group) {
  6659. spin_lock(&block_group->lock);
  6660. if (block_group->iref)
  6661. break;
  6662. spin_unlock(&block_group->lock);
  6663. block_group = next_block_group(info->tree_root,
  6664. block_group);
  6665. }
  6666. if (!block_group) {
  6667. if (last == 0)
  6668. break;
  6669. last = 0;
  6670. continue;
  6671. }
  6672. inode = block_group->inode;
  6673. block_group->iref = 0;
  6674. block_group->inode = NULL;
  6675. spin_unlock(&block_group->lock);
  6676. iput(inode);
  6677. last = block_group->key.objectid + block_group->key.offset;
  6678. btrfs_put_block_group(block_group);
  6679. }
  6680. }
  6681. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  6682. {
  6683. struct btrfs_block_group_cache *block_group;
  6684. struct btrfs_space_info *space_info;
  6685. struct btrfs_caching_control *caching_ctl;
  6686. struct rb_node *n;
  6687. down_write(&info->extent_commit_sem);
  6688. while (!list_empty(&info->caching_block_groups)) {
  6689. caching_ctl = list_entry(info->caching_block_groups.next,
  6690. struct btrfs_caching_control, list);
  6691. list_del(&caching_ctl->list);
  6692. put_caching_control(caching_ctl);
  6693. }
  6694. up_write(&info->extent_commit_sem);
  6695. spin_lock(&info->block_group_cache_lock);
  6696. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  6697. block_group = rb_entry(n, struct btrfs_block_group_cache,
  6698. cache_node);
  6699. rb_erase(&block_group->cache_node,
  6700. &info->block_group_cache_tree);
  6701. spin_unlock(&info->block_group_cache_lock);
  6702. down_write(&block_group->space_info->groups_sem);
  6703. list_del(&block_group->list);
  6704. up_write(&block_group->space_info->groups_sem);
  6705. if (block_group->cached == BTRFS_CACHE_STARTED)
  6706. wait_block_group_cache_done(block_group);
  6707. /*
  6708. * We haven't cached this block group, which means we could
  6709. * possibly have excluded extents on this block group.
  6710. */
  6711. if (block_group->cached == BTRFS_CACHE_NO)
  6712. free_excluded_extents(info->extent_root, block_group);
  6713. btrfs_remove_free_space_cache(block_group);
  6714. btrfs_put_block_group(block_group);
  6715. spin_lock(&info->block_group_cache_lock);
  6716. }
  6717. spin_unlock(&info->block_group_cache_lock);
  6718. /* now that all the block groups are freed, go through and
  6719. * free all the space_info structs. This is only called during
  6720. * the final stages of unmount, and so we know nobody is
  6721. * using them. We call synchronize_rcu() once before we start,
  6722. * just to be on the safe side.
  6723. */
  6724. synchronize_rcu();
  6725. release_global_block_rsv(info);
  6726. while(!list_empty(&info->space_info)) {
  6727. space_info = list_entry(info->space_info.next,
  6728. struct btrfs_space_info,
  6729. list);
  6730. if (space_info->bytes_pinned > 0 ||
  6731. space_info->bytes_reserved > 0 ||
  6732. space_info->bytes_may_use > 0) {
  6733. WARN_ON(1);
  6734. dump_space_info(space_info, 0, 0);
  6735. }
  6736. list_del(&space_info->list);
  6737. kfree(space_info);
  6738. }
  6739. return 0;
  6740. }
  6741. static void __link_block_group(struct btrfs_space_info *space_info,
  6742. struct btrfs_block_group_cache *cache)
  6743. {
  6744. int index = get_block_group_index(cache);
  6745. down_write(&space_info->groups_sem);
  6746. list_add_tail(&cache->list, &space_info->block_groups[index]);
  6747. up_write(&space_info->groups_sem);
  6748. }
  6749. int btrfs_read_block_groups(struct btrfs_root *root)
  6750. {
  6751. struct btrfs_path *path;
  6752. int ret;
  6753. struct btrfs_block_group_cache *cache;
  6754. struct btrfs_fs_info *info = root->fs_info;
  6755. struct btrfs_space_info *space_info;
  6756. struct btrfs_key key;
  6757. struct btrfs_key found_key;
  6758. struct extent_buffer *leaf;
  6759. int need_clear = 0;
  6760. u64 cache_gen;
  6761. root = info->extent_root;
  6762. key.objectid = 0;
  6763. key.offset = 0;
  6764. btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
  6765. path = btrfs_alloc_path();
  6766. if (!path)
  6767. return -ENOMEM;
  6768. path->reada = 1;
  6769. cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
  6770. if (btrfs_test_opt(root, SPACE_CACHE) &&
  6771. btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
  6772. need_clear = 1;
  6773. if (btrfs_test_opt(root, CLEAR_CACHE))
  6774. need_clear = 1;
  6775. while (1) {
  6776. ret = find_first_block_group(root, path, &key);
  6777. if (ret > 0)
  6778. break;
  6779. if (ret != 0)
  6780. goto error;
  6781. leaf = path->nodes[0];
  6782. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  6783. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  6784. if (!cache) {
  6785. ret = -ENOMEM;
  6786. goto error;
  6787. }
  6788. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  6789. GFP_NOFS);
  6790. if (!cache->free_space_ctl) {
  6791. kfree(cache);
  6792. ret = -ENOMEM;
  6793. goto error;
  6794. }
  6795. atomic_set(&cache->count, 1);
  6796. spin_lock_init(&cache->lock);
  6797. cache->fs_info = info;
  6798. INIT_LIST_HEAD(&cache->list);
  6799. INIT_LIST_HEAD(&cache->cluster_list);
  6800. if (need_clear) {
  6801. /*
  6802. * When we mount with old space cache, we need to
  6803. * set BTRFS_DC_CLEAR and set dirty flag.
  6804. *
  6805. * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
  6806. * truncate the old free space cache inode and
  6807. * setup a new one.
  6808. * b) Setting 'dirty flag' makes sure that we flush
  6809. * the new space cache info onto disk.
  6810. */
  6811. cache->disk_cache_state = BTRFS_DC_CLEAR;
  6812. if (btrfs_test_opt(root, SPACE_CACHE))
  6813. cache->dirty = 1;
  6814. }
  6815. read_extent_buffer(leaf, &cache->item,
  6816. btrfs_item_ptr_offset(leaf, path->slots[0]),
  6817. sizeof(cache->item));
  6818. memcpy(&cache->key, &found_key, sizeof(found_key));
  6819. key.objectid = found_key.objectid + found_key.offset;
  6820. btrfs_release_path(path);
  6821. cache->flags = btrfs_block_group_flags(&cache->item);
  6822. cache->sectorsize = root->sectorsize;
  6823. btrfs_init_free_space_ctl(cache);
  6824. /*
  6825. * We need to exclude the super stripes now so that the space
  6826. * info has super bytes accounted for, otherwise we'll think
  6827. * we have more space than we actually do.
  6828. */
  6829. exclude_super_stripes(root, cache);
  6830. /*
  6831. * check for two cases, either we are full, and therefore
  6832. * don't need to bother with the caching work since we won't
  6833. * find any space, or we are empty, and we can just add all
  6834. * the space in and be done with it. This saves us _alot_ of
  6835. * time, particularly in the full case.
  6836. */
  6837. if (found_key.offset == btrfs_block_group_used(&cache->item)) {
  6838. cache->last_byte_to_unpin = (u64)-1;
  6839. cache->cached = BTRFS_CACHE_FINISHED;
  6840. free_excluded_extents(root, cache);
  6841. } else if (btrfs_block_group_used(&cache->item) == 0) {
  6842. cache->last_byte_to_unpin = (u64)-1;
  6843. cache->cached = BTRFS_CACHE_FINISHED;
  6844. add_new_free_space(cache, root->fs_info,
  6845. found_key.objectid,
  6846. found_key.objectid +
  6847. found_key.offset);
  6848. free_excluded_extents(root, cache);
  6849. }
  6850. ret = update_space_info(info, cache->flags, found_key.offset,
  6851. btrfs_block_group_used(&cache->item),
  6852. &space_info);
  6853. BUG_ON(ret); /* -ENOMEM */
  6854. cache->space_info = space_info;
  6855. spin_lock(&cache->space_info->lock);
  6856. cache->space_info->bytes_readonly += cache->bytes_super;
  6857. spin_unlock(&cache->space_info->lock);
  6858. __link_block_group(space_info, cache);
  6859. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  6860. BUG_ON(ret); /* Logic error */
  6861. set_avail_alloc_bits(root->fs_info, cache->flags);
  6862. if (btrfs_chunk_readonly(root, cache->key.objectid))
  6863. set_block_group_ro(cache, 1);
  6864. }
  6865. list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
  6866. if (!(get_alloc_profile(root, space_info->flags) &
  6867. (BTRFS_BLOCK_GROUP_RAID10 |
  6868. BTRFS_BLOCK_GROUP_RAID1 |
  6869. BTRFS_BLOCK_GROUP_DUP)))
  6870. continue;
  6871. /*
  6872. * avoid allocating from un-mirrored block group if there are
  6873. * mirrored block groups.
  6874. */
  6875. list_for_each_entry(cache, &space_info->block_groups[3], list)
  6876. set_block_group_ro(cache, 1);
  6877. list_for_each_entry(cache, &space_info->block_groups[4], list)
  6878. set_block_group_ro(cache, 1);
  6879. }
  6880. init_global_block_rsv(info);
  6881. ret = 0;
  6882. error:
  6883. btrfs_free_path(path);
  6884. return ret;
  6885. }
  6886. int btrfs_make_block_group(struct btrfs_trans_handle *trans,
  6887. struct btrfs_root *root, u64 bytes_used,
  6888. u64 type, u64 chunk_objectid, u64 chunk_offset,
  6889. u64 size)
  6890. {
  6891. int ret;
  6892. struct btrfs_root *extent_root;
  6893. struct btrfs_block_group_cache *cache;
  6894. extent_root = root->fs_info->extent_root;
  6895. root->fs_info->last_trans_log_full_commit = trans->transid;
  6896. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  6897. if (!cache)
  6898. return -ENOMEM;
  6899. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  6900. GFP_NOFS);
  6901. if (!cache->free_space_ctl) {
  6902. kfree(cache);
  6903. return -ENOMEM;
  6904. }
  6905. cache->key.objectid = chunk_offset;
  6906. cache->key.offset = size;
  6907. cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  6908. cache->sectorsize = root->sectorsize;
  6909. cache->fs_info = root->fs_info;
  6910. atomic_set(&cache->count, 1);
  6911. spin_lock_init(&cache->lock);
  6912. INIT_LIST_HEAD(&cache->list);
  6913. INIT_LIST_HEAD(&cache->cluster_list);
  6914. btrfs_init_free_space_ctl(cache);
  6915. btrfs_set_block_group_used(&cache->item, bytes_used);
  6916. btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
  6917. cache->flags = type;
  6918. btrfs_set_block_group_flags(&cache->item, type);
  6919. cache->last_byte_to_unpin = (u64)-1;
  6920. cache->cached = BTRFS_CACHE_FINISHED;
  6921. exclude_super_stripes(root, cache);
  6922. add_new_free_space(cache, root->fs_info, chunk_offset,
  6923. chunk_offset + size);
  6924. free_excluded_extents(root, cache);
  6925. ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
  6926. &cache->space_info);
  6927. BUG_ON(ret); /* -ENOMEM */
  6928. update_global_block_rsv(root->fs_info);
  6929. spin_lock(&cache->space_info->lock);
  6930. cache->space_info->bytes_readonly += cache->bytes_super;
  6931. spin_unlock(&cache->space_info->lock);
  6932. __link_block_group(cache->space_info, cache);
  6933. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  6934. BUG_ON(ret); /* Logic error */
  6935. ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
  6936. sizeof(cache->item));
  6937. if (ret) {
  6938. btrfs_abort_transaction(trans, extent_root, ret);
  6939. return ret;
  6940. }
  6941. set_avail_alloc_bits(extent_root->fs_info, type);
  6942. return 0;
  6943. }
  6944. static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  6945. {
  6946. u64 extra_flags = chunk_to_extended(flags) &
  6947. BTRFS_EXTENDED_PROFILE_MASK;
  6948. if (flags & BTRFS_BLOCK_GROUP_DATA)
  6949. fs_info->avail_data_alloc_bits &= ~extra_flags;
  6950. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  6951. fs_info->avail_metadata_alloc_bits &= ~extra_flags;
  6952. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  6953. fs_info->avail_system_alloc_bits &= ~extra_flags;
  6954. }
  6955. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  6956. struct btrfs_root *root, u64 group_start)
  6957. {
  6958. struct btrfs_path *path;
  6959. struct btrfs_block_group_cache *block_group;
  6960. struct btrfs_free_cluster *cluster;
  6961. struct btrfs_root *tree_root = root->fs_info->tree_root;
  6962. struct btrfs_key key;
  6963. struct inode *inode;
  6964. int ret;
  6965. int index;
  6966. int factor;
  6967. root = root->fs_info->extent_root;
  6968. block_group = btrfs_lookup_block_group(root->fs_info, group_start);
  6969. BUG_ON(!block_group);
  6970. BUG_ON(!block_group->ro);
  6971. /*
  6972. * Free the reserved super bytes from this block group before
  6973. * remove it.
  6974. */
  6975. free_excluded_extents(root, block_group);
  6976. memcpy(&key, &block_group->key, sizeof(key));
  6977. index = get_block_group_index(block_group);
  6978. if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
  6979. BTRFS_BLOCK_GROUP_RAID1 |
  6980. BTRFS_BLOCK_GROUP_RAID10))
  6981. factor = 2;
  6982. else
  6983. factor = 1;
  6984. /* make sure this block group isn't part of an allocation cluster */
  6985. cluster = &root->fs_info->data_alloc_cluster;
  6986. spin_lock(&cluster->refill_lock);
  6987. btrfs_return_cluster_to_free_space(block_group, cluster);
  6988. spin_unlock(&cluster->refill_lock);
  6989. /*
  6990. * make sure this block group isn't part of a metadata
  6991. * allocation cluster
  6992. */
  6993. cluster = &root->fs_info->meta_alloc_cluster;
  6994. spin_lock(&cluster->refill_lock);
  6995. btrfs_return_cluster_to_free_space(block_group, cluster);
  6996. spin_unlock(&cluster->refill_lock);
  6997. path = btrfs_alloc_path();
  6998. if (!path) {
  6999. ret = -ENOMEM;
  7000. goto out;
  7001. }
  7002. inode = lookup_free_space_inode(tree_root, block_group, path);
  7003. if (!IS_ERR(inode)) {
  7004. ret = btrfs_orphan_add(trans, inode);
  7005. if (ret) {
  7006. btrfs_add_delayed_iput(inode);
  7007. goto out;
  7008. }
  7009. clear_nlink(inode);
  7010. /* One for the block groups ref */
  7011. spin_lock(&block_group->lock);
  7012. if (block_group->iref) {
  7013. block_group->iref = 0;
  7014. block_group->inode = NULL;
  7015. spin_unlock(&block_group->lock);
  7016. iput(inode);
  7017. } else {
  7018. spin_unlock(&block_group->lock);
  7019. }
  7020. /* One for our lookup ref */
  7021. btrfs_add_delayed_iput(inode);
  7022. }
  7023. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  7024. key.offset = block_group->key.objectid;
  7025. key.type = 0;
  7026. ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
  7027. if (ret < 0)
  7028. goto out;
  7029. if (ret > 0)
  7030. btrfs_release_path(path);
  7031. if (ret == 0) {
  7032. ret = btrfs_del_item(trans, tree_root, path);
  7033. if (ret)
  7034. goto out;
  7035. btrfs_release_path(path);
  7036. }
  7037. spin_lock(&root->fs_info->block_group_cache_lock);
  7038. rb_erase(&block_group->cache_node,
  7039. &root->fs_info->block_group_cache_tree);
  7040. spin_unlock(&root->fs_info->block_group_cache_lock);
  7041. down_write(&block_group->space_info->groups_sem);
  7042. /*
  7043. * we must use list_del_init so people can check to see if they
  7044. * are still on the list after taking the semaphore
  7045. */
  7046. list_del_init(&block_group->list);
  7047. if (list_empty(&block_group->space_info->block_groups[index]))
  7048. clear_avail_alloc_bits(root->fs_info, block_group->flags);
  7049. up_write(&block_group->space_info->groups_sem);
  7050. if (block_group->cached == BTRFS_CACHE_STARTED)
  7051. wait_block_group_cache_done(block_group);
  7052. btrfs_remove_free_space_cache(block_group);
  7053. spin_lock(&block_group->space_info->lock);
  7054. block_group->space_info->total_bytes -= block_group->key.offset;
  7055. block_group->space_info->bytes_readonly -= block_group->key.offset;
  7056. block_group->space_info->disk_total -= block_group->key.offset * factor;
  7057. spin_unlock(&block_group->space_info->lock);
  7058. memcpy(&key, &block_group->key, sizeof(key));
  7059. btrfs_clear_space_info_full(root->fs_info);
  7060. btrfs_put_block_group(block_group);
  7061. btrfs_put_block_group(block_group);
  7062. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  7063. if (ret > 0)
  7064. ret = -EIO;
  7065. if (ret < 0)
  7066. goto out;
  7067. ret = btrfs_del_item(trans, root, path);
  7068. out:
  7069. btrfs_free_path(path);
  7070. return ret;
  7071. }
  7072. int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
  7073. {
  7074. struct btrfs_space_info *space_info;
  7075. struct btrfs_super_block *disk_super;
  7076. u64 features;
  7077. u64 flags;
  7078. int mixed = 0;
  7079. int ret;
  7080. disk_super = fs_info->super_copy;
  7081. if (!btrfs_super_root(disk_super))
  7082. return 1;
  7083. features = btrfs_super_incompat_flags(disk_super);
  7084. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  7085. mixed = 1;
  7086. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  7087. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7088. if (ret)
  7089. goto out;
  7090. if (mixed) {
  7091. flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
  7092. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7093. } else {
  7094. flags = BTRFS_BLOCK_GROUP_METADATA;
  7095. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7096. if (ret)
  7097. goto out;
  7098. flags = BTRFS_BLOCK_GROUP_DATA;
  7099. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7100. }
  7101. out:
  7102. return ret;
  7103. }
  7104. int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  7105. {
  7106. return unpin_extent_range(root, start, end);
  7107. }
  7108. int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
  7109. u64 num_bytes, u64 *actual_bytes)
  7110. {
  7111. return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
  7112. }
  7113. int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
  7114. {
  7115. struct btrfs_fs_info *fs_info = root->fs_info;
  7116. struct btrfs_block_group_cache *cache = NULL;
  7117. u64 group_trimmed;
  7118. u64 start;
  7119. u64 end;
  7120. u64 trimmed = 0;
  7121. u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
  7122. int ret = 0;
  7123. /*
  7124. * try to trim all FS space, our block group may start from non-zero.
  7125. */
  7126. if (range->len == total_bytes)
  7127. cache = btrfs_lookup_first_block_group(fs_info, range->start);
  7128. else
  7129. cache = btrfs_lookup_block_group(fs_info, range->start);
  7130. while (cache) {
  7131. if (cache->key.objectid >= (range->start + range->len)) {
  7132. btrfs_put_block_group(cache);
  7133. break;
  7134. }
  7135. start = max(range->start, cache->key.objectid);
  7136. end = min(range->start + range->len,
  7137. cache->key.objectid + cache->key.offset);
  7138. if (end - start >= range->minlen) {
  7139. if (!block_group_cache_done(cache)) {
  7140. ret = cache_block_group(cache, NULL, root, 0);
  7141. if (!ret)
  7142. wait_block_group_cache_done(cache);
  7143. }
  7144. ret = btrfs_trim_block_group(cache,
  7145. &group_trimmed,
  7146. start,
  7147. end,
  7148. range->minlen);
  7149. trimmed += group_trimmed;
  7150. if (ret) {
  7151. btrfs_put_block_group(cache);
  7152. break;
  7153. }
  7154. }
  7155. cache = next_block_group(fs_info->tree_root, cache);
  7156. }
  7157. range->len = trimmed;
  7158. return ret;
  7159. }