extent-tree.c 168 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/sort.h>
  23. #include "compat.h"
  24. #include "hash.h"
  25. #include "crc32c.h"
  26. #include "ctree.h"
  27. #include "disk-io.h"
  28. #include "print-tree.h"
  29. #include "transaction.h"
  30. #include "volumes.h"
  31. #include "locking.h"
  32. #include "ref-cache.h"
  33. #define PENDING_EXTENT_INSERT 0
  34. #define PENDING_EXTENT_DELETE 1
  35. #define PENDING_BACKREF_UPDATE 2
  36. struct pending_extent_op {
  37. int type;
  38. u64 bytenr;
  39. u64 num_bytes;
  40. u64 parent;
  41. u64 orig_parent;
  42. u64 generation;
  43. u64 orig_generation;
  44. int level;
  45. struct list_head list;
  46. int del;
  47. };
  48. static int finish_current_insert(struct btrfs_trans_handle *trans,
  49. struct btrfs_root *extent_root, int all);
  50. static int del_pending_extents(struct btrfs_trans_handle *trans,
  51. struct btrfs_root *extent_root, int all);
  52. static int pin_down_bytes(struct btrfs_trans_handle *trans,
  53. struct btrfs_root *root,
  54. u64 bytenr, u64 num_bytes, int is_data);
  55. static int update_block_group(struct btrfs_trans_handle *trans,
  56. struct btrfs_root *root,
  57. u64 bytenr, u64 num_bytes, int alloc,
  58. int mark_free);
  59. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  60. struct btrfs_root *extent_root, u64 alloc_bytes,
  61. u64 flags, int force);
  62. static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
  63. {
  64. return (cache->flags & bits) == bits;
  65. }
  66. /*
  67. * this adds the block group to the fs_info rb tree for the block group
  68. * cache
  69. */
  70. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  71. struct btrfs_block_group_cache *block_group)
  72. {
  73. struct rb_node **p;
  74. struct rb_node *parent = NULL;
  75. struct btrfs_block_group_cache *cache;
  76. spin_lock(&info->block_group_cache_lock);
  77. p = &info->block_group_cache_tree.rb_node;
  78. while (*p) {
  79. parent = *p;
  80. cache = rb_entry(parent, struct btrfs_block_group_cache,
  81. cache_node);
  82. if (block_group->key.objectid < cache->key.objectid) {
  83. p = &(*p)->rb_left;
  84. } else if (block_group->key.objectid > cache->key.objectid) {
  85. p = &(*p)->rb_right;
  86. } else {
  87. spin_unlock(&info->block_group_cache_lock);
  88. return -EEXIST;
  89. }
  90. }
  91. rb_link_node(&block_group->cache_node, parent, p);
  92. rb_insert_color(&block_group->cache_node,
  93. &info->block_group_cache_tree);
  94. spin_unlock(&info->block_group_cache_lock);
  95. return 0;
  96. }
  97. /*
  98. * This will return the block group at or after bytenr if contains is 0, else
  99. * it will return the block group that contains the bytenr
  100. */
  101. static struct btrfs_block_group_cache *
  102. block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
  103. int contains)
  104. {
  105. struct btrfs_block_group_cache *cache, *ret = NULL;
  106. struct rb_node *n;
  107. u64 end, start;
  108. spin_lock(&info->block_group_cache_lock);
  109. n = info->block_group_cache_tree.rb_node;
  110. while (n) {
  111. cache = rb_entry(n, struct btrfs_block_group_cache,
  112. cache_node);
  113. end = cache->key.objectid + cache->key.offset - 1;
  114. start = cache->key.objectid;
  115. if (bytenr < start) {
  116. if (!contains && (!ret || start < ret->key.objectid))
  117. ret = cache;
  118. n = n->rb_left;
  119. } else if (bytenr > start) {
  120. if (contains && bytenr <= end) {
  121. ret = cache;
  122. break;
  123. }
  124. n = n->rb_right;
  125. } else {
  126. ret = cache;
  127. break;
  128. }
  129. }
  130. if (ret)
  131. atomic_inc(&ret->count);
  132. spin_unlock(&info->block_group_cache_lock);
  133. return ret;
  134. }
  135. /*
  136. * this is only called by cache_block_group, since we could have freed extents
  137. * we need to check the pinned_extents for any extents that can't be used yet
  138. * since their free space will be released as soon as the transaction commits.
  139. */
  140. static int add_new_free_space(struct btrfs_block_group_cache *block_group,
  141. struct btrfs_fs_info *info, u64 start, u64 end)
  142. {
  143. u64 extent_start, extent_end, size;
  144. int ret;
  145. mutex_lock(&info->pinned_mutex);
  146. while (start < end) {
  147. ret = find_first_extent_bit(&info->pinned_extents, start,
  148. &extent_start, &extent_end,
  149. EXTENT_DIRTY);
  150. if (ret)
  151. break;
  152. if (extent_start == start) {
  153. start = extent_end + 1;
  154. } else if (extent_start > start && extent_start < end) {
  155. size = extent_start - start;
  156. ret = btrfs_add_free_space(block_group, start,
  157. size);
  158. BUG_ON(ret);
  159. start = extent_end + 1;
  160. } else {
  161. break;
  162. }
  163. }
  164. if (start < end) {
  165. size = end - start;
  166. ret = btrfs_add_free_space(block_group, start, size);
  167. BUG_ON(ret);
  168. }
  169. mutex_unlock(&info->pinned_mutex);
  170. return 0;
  171. }
  172. static int remove_sb_from_cache(struct btrfs_root *root,
  173. struct btrfs_block_group_cache *cache)
  174. {
  175. u64 bytenr;
  176. u64 *logical;
  177. int stripe_len;
  178. int i, nr, ret;
  179. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  180. bytenr = btrfs_sb_offset(i);
  181. ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
  182. cache->key.objectid, bytenr, 0,
  183. &logical, &nr, &stripe_len);
  184. BUG_ON(ret);
  185. while (nr--) {
  186. btrfs_remove_free_space(cache, logical[nr],
  187. stripe_len);
  188. }
  189. kfree(logical);
  190. }
  191. return 0;
  192. }
  193. static int cache_block_group(struct btrfs_root *root,
  194. struct btrfs_block_group_cache *block_group)
  195. {
  196. struct btrfs_path *path;
  197. int ret = 0;
  198. struct btrfs_key key;
  199. struct extent_buffer *leaf;
  200. int slot;
  201. u64 last;
  202. if (!block_group)
  203. return 0;
  204. root = root->fs_info->extent_root;
  205. if (block_group->cached)
  206. return 0;
  207. path = btrfs_alloc_path();
  208. if (!path)
  209. return -ENOMEM;
  210. path->reada = 2;
  211. /*
  212. * we get into deadlocks with paths held by callers of this function.
  213. * since the alloc_mutex is protecting things right now, just
  214. * skip the locking here
  215. */
  216. path->skip_locking = 1;
  217. last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
  218. key.objectid = last;
  219. key.offset = 0;
  220. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  221. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  222. if (ret < 0)
  223. goto err;
  224. while (1) {
  225. leaf = path->nodes[0];
  226. slot = path->slots[0];
  227. if (slot >= btrfs_header_nritems(leaf)) {
  228. ret = btrfs_next_leaf(root, path);
  229. if (ret < 0)
  230. goto err;
  231. if (ret == 0)
  232. continue;
  233. else
  234. break;
  235. }
  236. btrfs_item_key_to_cpu(leaf, &key, slot);
  237. if (key.objectid < block_group->key.objectid)
  238. goto next;
  239. if (key.objectid >= block_group->key.objectid +
  240. block_group->key.offset)
  241. break;
  242. if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
  243. add_new_free_space(block_group, root->fs_info, last,
  244. key.objectid);
  245. last = key.objectid + key.offset;
  246. }
  247. next:
  248. path->slots[0]++;
  249. }
  250. add_new_free_space(block_group, root->fs_info, last,
  251. block_group->key.objectid +
  252. block_group->key.offset);
  253. remove_sb_from_cache(root, block_group);
  254. block_group->cached = 1;
  255. ret = 0;
  256. err:
  257. btrfs_free_path(path);
  258. return ret;
  259. }
  260. /*
  261. * return the block group that starts at or after bytenr
  262. */
  263. static struct btrfs_block_group_cache *
  264. btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
  265. {
  266. struct btrfs_block_group_cache *cache;
  267. cache = block_group_cache_tree_search(info, bytenr, 0);
  268. return cache;
  269. }
  270. /*
  271. * return the block group that contains teh given bytenr
  272. */
  273. struct btrfs_block_group_cache *btrfs_lookup_block_group(
  274. struct btrfs_fs_info *info,
  275. u64 bytenr)
  276. {
  277. struct btrfs_block_group_cache *cache;
  278. cache = block_group_cache_tree_search(info, bytenr, 1);
  279. return cache;
  280. }
  281. static inline void put_block_group(struct btrfs_block_group_cache *cache)
  282. {
  283. if (atomic_dec_and_test(&cache->count))
  284. kfree(cache);
  285. }
  286. static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
  287. u64 flags)
  288. {
  289. struct list_head *head = &info->space_info;
  290. struct btrfs_space_info *found;
  291. list_for_each_entry(found, head, list) {
  292. if (found->flags == flags)
  293. return found;
  294. }
  295. return NULL;
  296. }
  297. static u64 div_factor(u64 num, int factor)
  298. {
  299. if (factor == 10)
  300. return num;
  301. num *= factor;
  302. do_div(num, 10);
  303. return num;
  304. }
  305. u64 btrfs_find_block_group(struct btrfs_root *root,
  306. u64 search_start, u64 search_hint, int owner)
  307. {
  308. struct btrfs_block_group_cache *cache;
  309. u64 used;
  310. u64 last = max(search_hint, search_start);
  311. u64 group_start = 0;
  312. int full_search = 0;
  313. int factor = 9;
  314. int wrapped = 0;
  315. again:
  316. while (1) {
  317. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  318. if (!cache)
  319. break;
  320. spin_lock(&cache->lock);
  321. last = cache->key.objectid + cache->key.offset;
  322. used = btrfs_block_group_used(&cache->item);
  323. if ((full_search || !cache->ro) &&
  324. block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
  325. if (used + cache->pinned + cache->reserved <
  326. div_factor(cache->key.offset, factor)) {
  327. group_start = cache->key.objectid;
  328. spin_unlock(&cache->lock);
  329. put_block_group(cache);
  330. goto found;
  331. }
  332. }
  333. spin_unlock(&cache->lock);
  334. put_block_group(cache);
  335. cond_resched();
  336. }
  337. if (!wrapped) {
  338. last = search_start;
  339. wrapped = 1;
  340. goto again;
  341. }
  342. if (!full_search && factor < 10) {
  343. last = search_start;
  344. full_search = 1;
  345. factor = 10;
  346. goto again;
  347. }
  348. found:
  349. return group_start;
  350. }
  351. /* simple helper to search for an existing extent at a given offset */
  352. int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
  353. {
  354. int ret;
  355. struct btrfs_key key;
  356. struct btrfs_path *path;
  357. path = btrfs_alloc_path();
  358. BUG_ON(!path);
  359. key.objectid = start;
  360. key.offset = len;
  361. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  362. ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
  363. 0, 0);
  364. btrfs_free_path(path);
  365. return ret;
  366. }
  367. /*
  368. * Back reference rules. Back refs have three main goals:
  369. *
  370. * 1) differentiate between all holders of references to an extent so that
  371. * when a reference is dropped we can make sure it was a valid reference
  372. * before freeing the extent.
  373. *
  374. * 2) Provide enough information to quickly find the holders of an extent
  375. * if we notice a given block is corrupted or bad.
  376. *
  377. * 3) Make it easy to migrate blocks for FS shrinking or storage pool
  378. * maintenance. This is actually the same as #2, but with a slightly
  379. * different use case.
  380. *
  381. * File extents can be referenced by:
  382. *
  383. * - multiple snapshots, subvolumes, or different generations in one subvol
  384. * - different files inside a single subvolume
  385. * - different offsets inside a file (bookend extents in file.c)
  386. *
  387. * The extent ref structure has fields for:
  388. *
  389. * - Objectid of the subvolume root
  390. * - Generation number of the tree holding the reference
  391. * - objectid of the file holding the reference
  392. * - number of references holding by parent node (alway 1 for tree blocks)
  393. *
  394. * Btree leaf may hold multiple references to a file extent. In most cases,
  395. * these references are from same file and the corresponding offsets inside
  396. * the file are close together.
  397. *
  398. * When a file extent is allocated the fields are filled in:
  399. * (root_key.objectid, trans->transid, inode objectid, 1)
  400. *
  401. * When a leaf is cow'd new references are added for every file extent found
  402. * in the leaf. It looks similar to the create case, but trans->transid will
  403. * be different when the block is cow'd.
  404. *
  405. * (root_key.objectid, trans->transid, inode objectid,
  406. * number of references in the leaf)
  407. *
  408. * When a file extent is removed either during snapshot deletion or
  409. * file truncation, we find the corresponding back reference and check
  410. * the following fields:
  411. *
  412. * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
  413. * inode objectid)
  414. *
  415. * Btree extents can be referenced by:
  416. *
  417. * - Different subvolumes
  418. * - Different generations of the same subvolume
  419. *
  420. * When a tree block is created, back references are inserted:
  421. *
  422. * (root->root_key.objectid, trans->transid, level, 1)
  423. *
  424. * When a tree block is cow'd, new back references are added for all the
  425. * blocks it points to. If the tree block isn't in reference counted root,
  426. * the old back references are removed. These new back references are of
  427. * the form (trans->transid will have increased since creation):
  428. *
  429. * (root->root_key.objectid, trans->transid, level, 1)
  430. *
  431. * When a backref is in deleting, the following fields are checked:
  432. *
  433. * if backref was for a tree root:
  434. * (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
  435. * else
  436. * (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
  437. *
  438. * Back Reference Key composing:
  439. *
  440. * The key objectid corresponds to the first byte in the extent, the key
  441. * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
  442. * byte of parent extent. If a extent is tree root, the key offset is set
  443. * to the key objectid.
  444. */
  445. static noinline int lookup_extent_backref(struct btrfs_trans_handle *trans,
  446. struct btrfs_root *root,
  447. struct btrfs_path *path,
  448. u64 bytenr, u64 parent,
  449. u64 ref_root, u64 ref_generation,
  450. u64 owner_objectid, int del)
  451. {
  452. struct btrfs_key key;
  453. struct btrfs_extent_ref *ref;
  454. struct extent_buffer *leaf;
  455. u64 ref_objectid;
  456. int ret;
  457. key.objectid = bytenr;
  458. key.type = BTRFS_EXTENT_REF_KEY;
  459. key.offset = parent;
  460. ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1);
  461. if (ret < 0)
  462. goto out;
  463. if (ret > 0) {
  464. ret = -ENOENT;
  465. goto out;
  466. }
  467. leaf = path->nodes[0];
  468. ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
  469. ref_objectid = btrfs_ref_objectid(leaf, ref);
  470. if (btrfs_ref_root(leaf, ref) != ref_root ||
  471. btrfs_ref_generation(leaf, ref) != ref_generation ||
  472. (ref_objectid != owner_objectid &&
  473. ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
  474. ret = -EIO;
  475. WARN_ON(1);
  476. goto out;
  477. }
  478. ret = 0;
  479. out:
  480. return ret;
  481. }
  482. /*
  483. * updates all the backrefs that are pending on update_list for the
  484. * extent_root
  485. */
  486. static noinline int update_backrefs(struct btrfs_trans_handle *trans,
  487. struct btrfs_root *extent_root,
  488. struct btrfs_path *path,
  489. struct list_head *update_list)
  490. {
  491. struct btrfs_key key;
  492. struct btrfs_extent_ref *ref;
  493. struct btrfs_fs_info *info = extent_root->fs_info;
  494. struct pending_extent_op *op;
  495. struct extent_buffer *leaf;
  496. int ret = 0;
  497. struct list_head *cur = update_list->next;
  498. u64 ref_objectid;
  499. u64 ref_root = extent_root->root_key.objectid;
  500. op = list_entry(cur, struct pending_extent_op, list);
  501. search:
  502. key.objectid = op->bytenr;
  503. key.type = BTRFS_EXTENT_REF_KEY;
  504. key.offset = op->orig_parent;
  505. ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
  506. BUG_ON(ret);
  507. leaf = path->nodes[0];
  508. loop:
  509. ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
  510. ref_objectid = btrfs_ref_objectid(leaf, ref);
  511. if (btrfs_ref_root(leaf, ref) != ref_root ||
  512. btrfs_ref_generation(leaf, ref) != op->orig_generation ||
  513. (ref_objectid != op->level &&
  514. ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
  515. printk(KERN_ERR "btrfs couldn't find %llu, parent %llu, "
  516. "root %llu, owner %u\n",
  517. (unsigned long long)op->bytenr,
  518. (unsigned long long)op->orig_parent,
  519. (unsigned long long)ref_root, op->level);
  520. btrfs_print_leaf(extent_root, leaf);
  521. BUG();
  522. }
  523. key.objectid = op->bytenr;
  524. key.offset = op->parent;
  525. key.type = BTRFS_EXTENT_REF_KEY;
  526. ret = btrfs_set_item_key_safe(trans, extent_root, path, &key);
  527. BUG_ON(ret);
  528. ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
  529. btrfs_set_ref_generation(leaf, ref, op->generation);
  530. cur = cur->next;
  531. list_del_init(&op->list);
  532. unlock_extent(&info->extent_ins, op->bytenr,
  533. op->bytenr + op->num_bytes - 1, GFP_NOFS);
  534. kfree(op);
  535. if (cur == update_list) {
  536. btrfs_mark_buffer_dirty(path->nodes[0]);
  537. btrfs_release_path(extent_root, path);
  538. goto out;
  539. }
  540. op = list_entry(cur, struct pending_extent_op, list);
  541. path->slots[0]++;
  542. while (path->slots[0] < btrfs_header_nritems(leaf)) {
  543. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  544. if (key.objectid == op->bytenr &&
  545. key.type == BTRFS_EXTENT_REF_KEY)
  546. goto loop;
  547. path->slots[0]++;
  548. }
  549. btrfs_mark_buffer_dirty(path->nodes[0]);
  550. btrfs_release_path(extent_root, path);
  551. goto search;
  552. out:
  553. return 0;
  554. }
  555. static noinline int insert_extents(struct btrfs_trans_handle *trans,
  556. struct btrfs_root *extent_root,
  557. struct btrfs_path *path,
  558. struct list_head *insert_list, int nr)
  559. {
  560. struct btrfs_key *keys;
  561. u32 *data_size;
  562. struct pending_extent_op *op;
  563. struct extent_buffer *leaf;
  564. struct list_head *cur = insert_list->next;
  565. struct btrfs_fs_info *info = extent_root->fs_info;
  566. u64 ref_root = extent_root->root_key.objectid;
  567. int i = 0, last = 0, ret;
  568. int total = nr * 2;
  569. if (!nr)
  570. return 0;
  571. keys = kzalloc(total * sizeof(struct btrfs_key), GFP_NOFS);
  572. if (!keys)
  573. return -ENOMEM;
  574. data_size = kzalloc(total * sizeof(u32), GFP_NOFS);
  575. if (!data_size) {
  576. kfree(keys);
  577. return -ENOMEM;
  578. }
  579. list_for_each_entry(op, insert_list, list) {
  580. keys[i].objectid = op->bytenr;
  581. keys[i].offset = op->num_bytes;
  582. keys[i].type = BTRFS_EXTENT_ITEM_KEY;
  583. data_size[i] = sizeof(struct btrfs_extent_item);
  584. i++;
  585. keys[i].objectid = op->bytenr;
  586. keys[i].offset = op->parent;
  587. keys[i].type = BTRFS_EXTENT_REF_KEY;
  588. data_size[i] = sizeof(struct btrfs_extent_ref);
  589. i++;
  590. }
  591. op = list_entry(cur, struct pending_extent_op, list);
  592. i = 0;
  593. while (i < total) {
  594. int c;
  595. ret = btrfs_insert_some_items(trans, extent_root, path,
  596. keys+i, data_size+i, total-i);
  597. BUG_ON(ret < 0);
  598. if (last && ret > 1)
  599. BUG();
  600. leaf = path->nodes[0];
  601. for (c = 0; c < ret; c++) {
  602. int ref_first = keys[i].type == BTRFS_EXTENT_REF_KEY;
  603. /*
  604. * if the first item we inserted was a backref, then
  605. * the EXTENT_ITEM will be the odd c's, else it will
  606. * be the even c's
  607. */
  608. if ((ref_first && (c % 2)) ||
  609. (!ref_first && !(c % 2))) {
  610. struct btrfs_extent_item *itm;
  611. itm = btrfs_item_ptr(leaf, path->slots[0] + c,
  612. struct btrfs_extent_item);
  613. btrfs_set_extent_refs(path->nodes[0], itm, 1);
  614. op->del++;
  615. } else {
  616. struct btrfs_extent_ref *ref;
  617. ref = btrfs_item_ptr(leaf, path->slots[0] + c,
  618. struct btrfs_extent_ref);
  619. btrfs_set_ref_root(leaf, ref, ref_root);
  620. btrfs_set_ref_generation(leaf, ref,
  621. op->generation);
  622. btrfs_set_ref_objectid(leaf, ref, op->level);
  623. btrfs_set_ref_num_refs(leaf, ref, 1);
  624. op->del++;
  625. }
  626. /*
  627. * using del to see when its ok to free up the
  628. * pending_extent_op. In the case where we insert the
  629. * last item on the list in order to help do batching
  630. * we need to not free the extent op until we actually
  631. * insert the extent_item
  632. */
  633. if (op->del == 2) {
  634. unlock_extent(&info->extent_ins, op->bytenr,
  635. op->bytenr + op->num_bytes - 1,
  636. GFP_NOFS);
  637. cur = cur->next;
  638. list_del_init(&op->list);
  639. kfree(op);
  640. if (cur != insert_list)
  641. op = list_entry(cur,
  642. struct pending_extent_op,
  643. list);
  644. }
  645. }
  646. btrfs_mark_buffer_dirty(leaf);
  647. btrfs_release_path(extent_root, path);
  648. /*
  649. * Ok backref's and items usually go right next to eachother,
  650. * but if we could only insert 1 item that means that we
  651. * inserted on the end of a leaf, and we have no idea what may
  652. * be on the next leaf so we just play it safe. In order to
  653. * try and help this case we insert the last thing on our
  654. * insert list so hopefully it will end up being the last
  655. * thing on the leaf and everything else will be before it,
  656. * which will let us insert a whole bunch of items at the same
  657. * time.
  658. */
  659. if (ret == 1 && !last && (i + ret < total)) {
  660. /*
  661. * last: where we will pick up the next time around
  662. * i: our current key to insert, will be total - 1
  663. * cur: the current op we are screwing with
  664. * op: duh
  665. */
  666. last = i + ret;
  667. i = total - 1;
  668. cur = insert_list->prev;
  669. op = list_entry(cur, struct pending_extent_op, list);
  670. } else if (last) {
  671. /*
  672. * ok we successfully inserted the last item on the
  673. * list, lets reset everything
  674. *
  675. * i: our current key to insert, so where we left off
  676. * last time
  677. * last: done with this
  678. * cur: the op we are messing with
  679. * op: duh
  680. * total: since we inserted the last key, we need to
  681. * decrement total so we dont overflow
  682. */
  683. i = last;
  684. last = 0;
  685. total--;
  686. if (i < total) {
  687. cur = insert_list->next;
  688. op = list_entry(cur, struct pending_extent_op,
  689. list);
  690. }
  691. } else {
  692. i += ret;
  693. }
  694. cond_resched();
  695. }
  696. ret = 0;
  697. kfree(keys);
  698. kfree(data_size);
  699. return ret;
  700. }
  701. static noinline int insert_extent_backref(struct btrfs_trans_handle *trans,
  702. struct btrfs_root *root,
  703. struct btrfs_path *path,
  704. u64 bytenr, u64 parent,
  705. u64 ref_root, u64 ref_generation,
  706. u64 owner_objectid)
  707. {
  708. struct btrfs_key key;
  709. struct extent_buffer *leaf;
  710. struct btrfs_extent_ref *ref;
  711. u32 num_refs;
  712. int ret;
  713. key.objectid = bytenr;
  714. key.type = BTRFS_EXTENT_REF_KEY;
  715. key.offset = parent;
  716. ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref));
  717. if (ret == 0) {
  718. leaf = path->nodes[0];
  719. ref = btrfs_item_ptr(leaf, path->slots[0],
  720. struct btrfs_extent_ref);
  721. btrfs_set_ref_root(leaf, ref, ref_root);
  722. btrfs_set_ref_generation(leaf, ref, ref_generation);
  723. btrfs_set_ref_objectid(leaf, ref, owner_objectid);
  724. btrfs_set_ref_num_refs(leaf, ref, 1);
  725. } else if (ret == -EEXIST) {
  726. u64 existing_owner;
  727. BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID);
  728. leaf = path->nodes[0];
  729. ref = btrfs_item_ptr(leaf, path->slots[0],
  730. struct btrfs_extent_ref);
  731. if (btrfs_ref_root(leaf, ref) != ref_root ||
  732. btrfs_ref_generation(leaf, ref) != ref_generation) {
  733. ret = -EIO;
  734. WARN_ON(1);
  735. goto out;
  736. }
  737. num_refs = btrfs_ref_num_refs(leaf, ref);
  738. BUG_ON(num_refs == 0);
  739. btrfs_set_ref_num_refs(leaf, ref, num_refs + 1);
  740. existing_owner = btrfs_ref_objectid(leaf, ref);
  741. if (existing_owner != owner_objectid &&
  742. existing_owner != BTRFS_MULTIPLE_OBJECTIDS) {
  743. btrfs_set_ref_objectid(leaf, ref,
  744. BTRFS_MULTIPLE_OBJECTIDS);
  745. }
  746. ret = 0;
  747. } else {
  748. goto out;
  749. }
  750. btrfs_mark_buffer_dirty(path->nodes[0]);
  751. out:
  752. btrfs_release_path(root, path);
  753. return ret;
  754. }
  755. static noinline int remove_extent_backref(struct btrfs_trans_handle *trans,
  756. struct btrfs_root *root,
  757. struct btrfs_path *path)
  758. {
  759. struct extent_buffer *leaf;
  760. struct btrfs_extent_ref *ref;
  761. u32 num_refs;
  762. int ret = 0;
  763. leaf = path->nodes[0];
  764. ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
  765. num_refs = btrfs_ref_num_refs(leaf, ref);
  766. BUG_ON(num_refs == 0);
  767. num_refs -= 1;
  768. if (num_refs == 0) {
  769. ret = btrfs_del_item(trans, root, path);
  770. } else {
  771. btrfs_set_ref_num_refs(leaf, ref, num_refs);
  772. btrfs_mark_buffer_dirty(leaf);
  773. }
  774. btrfs_release_path(root, path);
  775. return ret;
  776. }
  777. #ifdef BIO_RW_DISCARD
  778. static void btrfs_issue_discard(struct block_device *bdev,
  779. u64 start, u64 len)
  780. {
  781. blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
  782. }
  783. #endif
  784. static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
  785. u64 num_bytes)
  786. {
  787. #ifdef BIO_RW_DISCARD
  788. int ret;
  789. u64 map_length = num_bytes;
  790. struct btrfs_multi_bio *multi = NULL;
  791. /* Tell the block device(s) that the sectors can be discarded */
  792. ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
  793. bytenr, &map_length, &multi, 0);
  794. if (!ret) {
  795. struct btrfs_bio_stripe *stripe = multi->stripes;
  796. int i;
  797. if (map_length > num_bytes)
  798. map_length = num_bytes;
  799. for (i = 0; i < multi->num_stripes; i++, stripe++) {
  800. btrfs_issue_discard(stripe->dev->bdev,
  801. stripe->physical,
  802. map_length);
  803. }
  804. kfree(multi);
  805. }
  806. return ret;
  807. #else
  808. return 0;
  809. #endif
  810. }
  811. static noinline int free_extents(struct btrfs_trans_handle *trans,
  812. struct btrfs_root *extent_root,
  813. struct list_head *del_list)
  814. {
  815. struct btrfs_fs_info *info = extent_root->fs_info;
  816. struct btrfs_path *path;
  817. struct btrfs_key key, found_key;
  818. struct extent_buffer *leaf;
  819. struct list_head *cur;
  820. struct pending_extent_op *op;
  821. struct btrfs_extent_item *ei;
  822. int ret, num_to_del, extent_slot = 0, found_extent = 0;
  823. u32 refs;
  824. u64 bytes_freed = 0;
  825. path = btrfs_alloc_path();
  826. if (!path)
  827. return -ENOMEM;
  828. path->reada = 1;
  829. search:
  830. /* search for the backref for the current ref we want to delete */
  831. cur = del_list->next;
  832. op = list_entry(cur, struct pending_extent_op, list);
  833. ret = lookup_extent_backref(trans, extent_root, path, op->bytenr,
  834. op->orig_parent,
  835. extent_root->root_key.objectid,
  836. op->orig_generation, op->level, 1);
  837. if (ret) {
  838. printk(KERN_ERR "btrfs unable to find backref byte nr %llu "
  839. "root %llu gen %llu owner %u\n",
  840. (unsigned long long)op->bytenr,
  841. (unsigned long long)extent_root->root_key.objectid,
  842. (unsigned long long)op->orig_generation, op->level);
  843. btrfs_print_leaf(extent_root, path->nodes[0]);
  844. WARN_ON(1);
  845. goto out;
  846. }
  847. extent_slot = path->slots[0];
  848. num_to_del = 1;
  849. found_extent = 0;
  850. /*
  851. * if we aren't the first item on the leaf we can move back one and see
  852. * if our ref is right next to our extent item
  853. */
  854. if (likely(extent_slot)) {
  855. extent_slot--;
  856. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  857. extent_slot);
  858. if (found_key.objectid == op->bytenr &&
  859. found_key.type == BTRFS_EXTENT_ITEM_KEY &&
  860. found_key.offset == op->num_bytes) {
  861. num_to_del++;
  862. found_extent = 1;
  863. }
  864. }
  865. /*
  866. * if we didn't find the extent we need to delete the backref and then
  867. * search for the extent item key so we can update its ref count
  868. */
  869. if (!found_extent) {
  870. key.objectid = op->bytenr;
  871. key.type = BTRFS_EXTENT_ITEM_KEY;
  872. key.offset = op->num_bytes;
  873. ret = remove_extent_backref(trans, extent_root, path);
  874. BUG_ON(ret);
  875. btrfs_release_path(extent_root, path);
  876. ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
  877. BUG_ON(ret);
  878. extent_slot = path->slots[0];
  879. }
  880. /* this is where we update the ref count for the extent */
  881. leaf = path->nodes[0];
  882. ei = btrfs_item_ptr(leaf, extent_slot, struct btrfs_extent_item);
  883. refs = btrfs_extent_refs(leaf, ei);
  884. BUG_ON(refs == 0);
  885. refs--;
  886. btrfs_set_extent_refs(leaf, ei, refs);
  887. btrfs_mark_buffer_dirty(leaf);
  888. /*
  889. * This extent needs deleting. The reason cur_slot is extent_slot +
  890. * num_to_del is because extent_slot points to the slot where the extent
  891. * is, and if the backref was not right next to the extent we will be
  892. * deleting at least 1 item, and will want to start searching at the
  893. * slot directly next to extent_slot. However if we did find the
  894. * backref next to the extent item them we will be deleting at least 2
  895. * items and will want to start searching directly after the ref slot
  896. */
  897. if (!refs) {
  898. struct list_head *pos, *n, *end;
  899. int cur_slot = extent_slot+num_to_del;
  900. u64 super_used;
  901. u64 root_used;
  902. path->slots[0] = extent_slot;
  903. bytes_freed = op->num_bytes;
  904. mutex_lock(&info->pinned_mutex);
  905. ret = pin_down_bytes(trans, extent_root, op->bytenr,
  906. op->num_bytes, op->level >=
  907. BTRFS_FIRST_FREE_OBJECTID);
  908. mutex_unlock(&info->pinned_mutex);
  909. BUG_ON(ret < 0);
  910. op->del = ret;
  911. /*
  912. * we need to see if we can delete multiple things at once, so
  913. * start looping through the list of extents we are wanting to
  914. * delete and see if their extent/backref's are right next to
  915. * eachother and the extents only have 1 ref
  916. */
  917. for (pos = cur->next; pos != del_list; pos = pos->next) {
  918. struct pending_extent_op *tmp;
  919. tmp = list_entry(pos, struct pending_extent_op, list);
  920. /* we only want to delete extent+ref at this stage */
  921. if (cur_slot >= btrfs_header_nritems(leaf) - 1)
  922. break;
  923. btrfs_item_key_to_cpu(leaf, &found_key, cur_slot);
  924. if (found_key.objectid != tmp->bytenr ||
  925. found_key.type != BTRFS_EXTENT_ITEM_KEY ||
  926. found_key.offset != tmp->num_bytes)
  927. break;
  928. /* check to make sure this extent only has one ref */
  929. ei = btrfs_item_ptr(leaf, cur_slot,
  930. struct btrfs_extent_item);
  931. if (btrfs_extent_refs(leaf, ei) != 1)
  932. break;
  933. btrfs_item_key_to_cpu(leaf, &found_key, cur_slot+1);
  934. if (found_key.objectid != tmp->bytenr ||
  935. found_key.type != BTRFS_EXTENT_REF_KEY ||
  936. found_key.offset != tmp->orig_parent)
  937. break;
  938. /*
  939. * the ref is right next to the extent, we can set the
  940. * ref count to 0 since we will delete them both now
  941. */
  942. btrfs_set_extent_refs(leaf, ei, 0);
  943. /* pin down the bytes for this extent */
  944. mutex_lock(&info->pinned_mutex);
  945. ret = pin_down_bytes(trans, extent_root, tmp->bytenr,
  946. tmp->num_bytes, tmp->level >=
  947. BTRFS_FIRST_FREE_OBJECTID);
  948. mutex_unlock(&info->pinned_mutex);
  949. BUG_ON(ret < 0);
  950. /*
  951. * use the del field to tell if we need to go ahead and
  952. * free up the extent when we delete the item or not.
  953. */
  954. tmp->del = ret;
  955. bytes_freed += tmp->num_bytes;
  956. num_to_del += 2;
  957. cur_slot += 2;
  958. }
  959. end = pos;
  960. /* update the free space counters */
  961. spin_lock(&info->delalloc_lock);
  962. super_used = btrfs_super_bytes_used(&info->super_copy);
  963. btrfs_set_super_bytes_used(&info->super_copy,
  964. super_used - bytes_freed);
  965. root_used = btrfs_root_used(&extent_root->root_item);
  966. btrfs_set_root_used(&extent_root->root_item,
  967. root_used - bytes_freed);
  968. spin_unlock(&info->delalloc_lock);
  969. /* delete the items */
  970. ret = btrfs_del_items(trans, extent_root, path,
  971. path->slots[0], num_to_del);
  972. BUG_ON(ret);
  973. /*
  974. * loop through the extents we deleted and do the cleanup work
  975. * on them
  976. */
  977. for (pos = cur, n = pos->next; pos != end;
  978. pos = n, n = pos->next) {
  979. struct pending_extent_op *tmp;
  980. tmp = list_entry(pos, struct pending_extent_op, list);
  981. /*
  982. * remember tmp->del tells us wether or not we pinned
  983. * down the extent
  984. */
  985. ret = update_block_group(trans, extent_root,
  986. tmp->bytenr, tmp->num_bytes, 0,
  987. tmp->del);
  988. BUG_ON(ret);
  989. list_del_init(&tmp->list);
  990. unlock_extent(&info->extent_ins, tmp->bytenr,
  991. tmp->bytenr + tmp->num_bytes - 1,
  992. GFP_NOFS);
  993. kfree(tmp);
  994. }
  995. } else if (refs && found_extent) {
  996. /*
  997. * the ref and extent were right next to eachother, but the
  998. * extent still has a ref, so just free the backref and keep
  999. * going
  1000. */
  1001. ret = remove_extent_backref(trans, extent_root, path);
  1002. BUG_ON(ret);
  1003. list_del_init(&op->list);
  1004. unlock_extent(&info->extent_ins, op->bytenr,
  1005. op->bytenr + op->num_bytes - 1, GFP_NOFS);
  1006. kfree(op);
  1007. } else {
  1008. /*
  1009. * the extent has multiple refs and the backref we were looking
  1010. * for was not right next to it, so just unlock and go next,
  1011. * we're good to go
  1012. */
  1013. list_del_init(&op->list);
  1014. unlock_extent(&info->extent_ins, op->bytenr,
  1015. op->bytenr + op->num_bytes - 1, GFP_NOFS);
  1016. kfree(op);
  1017. }
  1018. btrfs_release_path(extent_root, path);
  1019. if (!list_empty(del_list))
  1020. goto search;
  1021. out:
  1022. btrfs_free_path(path);
  1023. return ret;
  1024. }
  1025. static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
  1026. struct btrfs_root *root, u64 bytenr,
  1027. u64 orig_parent, u64 parent,
  1028. u64 orig_root, u64 ref_root,
  1029. u64 orig_generation, u64 ref_generation,
  1030. u64 owner_objectid)
  1031. {
  1032. int ret;
  1033. struct btrfs_root *extent_root = root->fs_info->extent_root;
  1034. struct btrfs_path *path;
  1035. if (root == root->fs_info->extent_root) {
  1036. struct pending_extent_op *extent_op;
  1037. u64 num_bytes;
  1038. BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL);
  1039. num_bytes = btrfs_level_size(root, (int)owner_objectid);
  1040. mutex_lock(&root->fs_info->extent_ins_mutex);
  1041. if (test_range_bit(&root->fs_info->extent_ins, bytenr,
  1042. bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
  1043. u64 priv;
  1044. ret = get_state_private(&root->fs_info->extent_ins,
  1045. bytenr, &priv);
  1046. BUG_ON(ret);
  1047. extent_op = (struct pending_extent_op *)
  1048. (unsigned long)priv;
  1049. BUG_ON(extent_op->parent != orig_parent);
  1050. BUG_ON(extent_op->generation != orig_generation);
  1051. extent_op->parent = parent;
  1052. extent_op->generation = ref_generation;
  1053. } else {
  1054. extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
  1055. BUG_ON(!extent_op);
  1056. extent_op->type = PENDING_BACKREF_UPDATE;
  1057. extent_op->bytenr = bytenr;
  1058. extent_op->num_bytes = num_bytes;
  1059. extent_op->parent = parent;
  1060. extent_op->orig_parent = orig_parent;
  1061. extent_op->generation = ref_generation;
  1062. extent_op->orig_generation = orig_generation;
  1063. extent_op->level = (int)owner_objectid;
  1064. INIT_LIST_HEAD(&extent_op->list);
  1065. extent_op->del = 0;
  1066. set_extent_bits(&root->fs_info->extent_ins,
  1067. bytenr, bytenr + num_bytes - 1,
  1068. EXTENT_WRITEBACK, GFP_NOFS);
  1069. set_state_private(&root->fs_info->extent_ins,
  1070. bytenr, (unsigned long)extent_op);
  1071. }
  1072. mutex_unlock(&root->fs_info->extent_ins_mutex);
  1073. return 0;
  1074. }
  1075. path = btrfs_alloc_path();
  1076. if (!path)
  1077. return -ENOMEM;
  1078. ret = lookup_extent_backref(trans, extent_root, path,
  1079. bytenr, orig_parent, orig_root,
  1080. orig_generation, owner_objectid, 1);
  1081. if (ret)
  1082. goto out;
  1083. ret = remove_extent_backref(trans, extent_root, path);
  1084. if (ret)
  1085. goto out;
  1086. ret = insert_extent_backref(trans, extent_root, path, bytenr,
  1087. parent, ref_root, ref_generation,
  1088. owner_objectid);
  1089. BUG_ON(ret);
  1090. finish_current_insert(trans, extent_root, 0);
  1091. del_pending_extents(trans, extent_root, 0);
  1092. out:
  1093. btrfs_free_path(path);
  1094. return ret;
  1095. }
  1096. int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
  1097. struct btrfs_root *root, u64 bytenr,
  1098. u64 orig_parent, u64 parent,
  1099. u64 ref_root, u64 ref_generation,
  1100. u64 owner_objectid)
  1101. {
  1102. int ret;
  1103. if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
  1104. owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
  1105. return 0;
  1106. ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent,
  1107. parent, ref_root, ref_root,
  1108. ref_generation, ref_generation,
  1109. owner_objectid);
  1110. return ret;
  1111. }
  1112. static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1113. struct btrfs_root *root, u64 bytenr,
  1114. u64 orig_parent, u64 parent,
  1115. u64 orig_root, u64 ref_root,
  1116. u64 orig_generation, u64 ref_generation,
  1117. u64 owner_objectid)
  1118. {
  1119. struct btrfs_path *path;
  1120. int ret;
  1121. struct btrfs_key key;
  1122. struct extent_buffer *l;
  1123. struct btrfs_extent_item *item;
  1124. u32 refs;
  1125. path = btrfs_alloc_path();
  1126. if (!path)
  1127. return -ENOMEM;
  1128. path->reada = 1;
  1129. key.objectid = bytenr;
  1130. key.type = BTRFS_EXTENT_ITEM_KEY;
  1131. key.offset = (u64)-1;
  1132. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
  1133. 0, 1);
  1134. if (ret < 0)
  1135. return ret;
  1136. BUG_ON(ret == 0 || path->slots[0] == 0);
  1137. path->slots[0]--;
  1138. l = path->nodes[0];
  1139. btrfs_item_key_to_cpu(l, &key, path->slots[0]);
  1140. if (key.objectid != bytenr) {
  1141. btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]);
  1142. printk(KERN_ERR "btrfs wanted %llu found %llu\n",
  1143. (unsigned long long)bytenr,
  1144. (unsigned long long)key.objectid);
  1145. BUG();
  1146. }
  1147. BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
  1148. item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
  1149. refs = btrfs_extent_refs(l, item);
  1150. btrfs_set_extent_refs(l, item, refs + 1);
  1151. btrfs_mark_buffer_dirty(path->nodes[0]);
  1152. btrfs_release_path(root->fs_info->extent_root, path);
  1153. path->reada = 1;
  1154. ret = insert_extent_backref(trans, root->fs_info->extent_root,
  1155. path, bytenr, parent,
  1156. ref_root, ref_generation,
  1157. owner_objectid);
  1158. BUG_ON(ret);
  1159. finish_current_insert(trans, root->fs_info->extent_root, 0);
  1160. del_pending_extents(trans, root->fs_info->extent_root, 0);
  1161. btrfs_free_path(path);
  1162. return 0;
  1163. }
  1164. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1165. struct btrfs_root *root,
  1166. u64 bytenr, u64 num_bytes, u64 parent,
  1167. u64 ref_root, u64 ref_generation,
  1168. u64 owner_objectid)
  1169. {
  1170. int ret;
  1171. if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
  1172. owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
  1173. return 0;
  1174. ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent,
  1175. 0, ref_root, 0, ref_generation,
  1176. owner_objectid);
  1177. return ret;
  1178. }
  1179. int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
  1180. struct btrfs_root *root)
  1181. {
  1182. u64 start;
  1183. u64 end;
  1184. int ret;
  1185. while(1) {
  1186. finish_current_insert(trans, root->fs_info->extent_root, 1);
  1187. del_pending_extents(trans, root->fs_info->extent_root, 1);
  1188. /* is there more work to do? */
  1189. ret = find_first_extent_bit(&root->fs_info->pending_del,
  1190. 0, &start, &end, EXTENT_WRITEBACK);
  1191. if (!ret)
  1192. continue;
  1193. ret = find_first_extent_bit(&root->fs_info->extent_ins,
  1194. 0, &start, &end, EXTENT_WRITEBACK);
  1195. if (!ret)
  1196. continue;
  1197. break;
  1198. }
  1199. return 0;
  1200. }
  1201. int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
  1202. struct btrfs_root *root, u64 bytenr,
  1203. u64 num_bytes, u32 *refs)
  1204. {
  1205. struct btrfs_path *path;
  1206. int ret;
  1207. struct btrfs_key key;
  1208. struct extent_buffer *l;
  1209. struct btrfs_extent_item *item;
  1210. WARN_ON(num_bytes < root->sectorsize);
  1211. path = btrfs_alloc_path();
  1212. path->reada = 1;
  1213. key.objectid = bytenr;
  1214. key.offset = num_bytes;
  1215. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  1216. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
  1217. 0, 0);
  1218. if (ret < 0)
  1219. goto out;
  1220. if (ret != 0) {
  1221. btrfs_print_leaf(root, path->nodes[0]);
  1222. printk(KERN_INFO "btrfs failed to find block number %llu\n",
  1223. (unsigned long long)bytenr);
  1224. BUG();
  1225. }
  1226. l = path->nodes[0];
  1227. item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
  1228. *refs = btrfs_extent_refs(l, item);
  1229. out:
  1230. btrfs_free_path(path);
  1231. return 0;
  1232. }
  1233. int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
  1234. struct btrfs_root *root, u64 objectid, u64 bytenr)
  1235. {
  1236. struct btrfs_root *extent_root = root->fs_info->extent_root;
  1237. struct btrfs_path *path;
  1238. struct extent_buffer *leaf;
  1239. struct btrfs_extent_ref *ref_item;
  1240. struct btrfs_key key;
  1241. struct btrfs_key found_key;
  1242. u64 ref_root;
  1243. u64 last_snapshot;
  1244. u32 nritems;
  1245. int ret;
  1246. key.objectid = bytenr;
  1247. key.offset = (u64)-1;
  1248. key.type = BTRFS_EXTENT_ITEM_KEY;
  1249. path = btrfs_alloc_path();
  1250. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  1251. if (ret < 0)
  1252. goto out;
  1253. BUG_ON(ret == 0);
  1254. ret = -ENOENT;
  1255. if (path->slots[0] == 0)
  1256. goto out;
  1257. path->slots[0]--;
  1258. leaf = path->nodes[0];
  1259. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  1260. if (found_key.objectid != bytenr ||
  1261. found_key.type != BTRFS_EXTENT_ITEM_KEY)
  1262. goto out;
  1263. last_snapshot = btrfs_root_last_snapshot(&root->root_item);
  1264. while (1) {
  1265. leaf = path->nodes[0];
  1266. nritems = btrfs_header_nritems(leaf);
  1267. if (path->slots[0] >= nritems) {
  1268. ret = btrfs_next_leaf(extent_root, path);
  1269. if (ret < 0)
  1270. goto out;
  1271. if (ret == 0)
  1272. continue;
  1273. break;
  1274. }
  1275. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  1276. if (found_key.objectid != bytenr)
  1277. break;
  1278. if (found_key.type != BTRFS_EXTENT_REF_KEY) {
  1279. path->slots[0]++;
  1280. continue;
  1281. }
  1282. ref_item = btrfs_item_ptr(leaf, path->slots[0],
  1283. struct btrfs_extent_ref);
  1284. ref_root = btrfs_ref_root(leaf, ref_item);
  1285. if ((ref_root != root->root_key.objectid &&
  1286. ref_root != BTRFS_TREE_LOG_OBJECTID) ||
  1287. objectid != btrfs_ref_objectid(leaf, ref_item)) {
  1288. ret = 1;
  1289. goto out;
  1290. }
  1291. if (btrfs_ref_generation(leaf, ref_item) <= last_snapshot) {
  1292. ret = 1;
  1293. goto out;
  1294. }
  1295. path->slots[0]++;
  1296. }
  1297. ret = 0;
  1298. out:
  1299. btrfs_free_path(path);
  1300. return ret;
  1301. }
  1302. int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  1303. struct extent_buffer *buf, u32 nr_extents)
  1304. {
  1305. struct btrfs_key key;
  1306. struct btrfs_file_extent_item *fi;
  1307. u64 root_gen;
  1308. u32 nritems;
  1309. int i;
  1310. int level;
  1311. int ret = 0;
  1312. int shared = 0;
  1313. if (!root->ref_cows)
  1314. return 0;
  1315. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  1316. shared = 0;
  1317. root_gen = root->root_key.offset;
  1318. } else {
  1319. shared = 1;
  1320. root_gen = trans->transid - 1;
  1321. }
  1322. level = btrfs_header_level(buf);
  1323. nritems = btrfs_header_nritems(buf);
  1324. if (level == 0) {
  1325. struct btrfs_leaf_ref *ref;
  1326. struct btrfs_extent_info *info;
  1327. ref = btrfs_alloc_leaf_ref(root, nr_extents);
  1328. if (!ref) {
  1329. ret = -ENOMEM;
  1330. goto out;
  1331. }
  1332. ref->root_gen = root_gen;
  1333. ref->bytenr = buf->start;
  1334. ref->owner = btrfs_header_owner(buf);
  1335. ref->generation = btrfs_header_generation(buf);
  1336. ref->nritems = nr_extents;
  1337. info = ref->extents;
  1338. for (i = 0; nr_extents > 0 && i < nritems; i++) {
  1339. u64 disk_bytenr;
  1340. btrfs_item_key_to_cpu(buf, &key, i);
  1341. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  1342. continue;
  1343. fi = btrfs_item_ptr(buf, i,
  1344. struct btrfs_file_extent_item);
  1345. if (btrfs_file_extent_type(buf, fi) ==
  1346. BTRFS_FILE_EXTENT_INLINE)
  1347. continue;
  1348. disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  1349. if (disk_bytenr == 0)
  1350. continue;
  1351. info->bytenr = disk_bytenr;
  1352. info->num_bytes =
  1353. btrfs_file_extent_disk_num_bytes(buf, fi);
  1354. info->objectid = key.objectid;
  1355. info->offset = key.offset;
  1356. info++;
  1357. }
  1358. ret = btrfs_add_leaf_ref(root, ref, shared);
  1359. if (ret == -EEXIST && shared) {
  1360. struct btrfs_leaf_ref *old;
  1361. old = btrfs_lookup_leaf_ref(root, ref->bytenr);
  1362. BUG_ON(!old);
  1363. btrfs_remove_leaf_ref(root, old);
  1364. btrfs_free_leaf_ref(root, old);
  1365. ret = btrfs_add_leaf_ref(root, ref, shared);
  1366. }
  1367. WARN_ON(ret);
  1368. btrfs_free_leaf_ref(root, ref);
  1369. }
  1370. out:
  1371. return ret;
  1372. }
  1373. /* when a block goes through cow, we update the reference counts of
  1374. * everything that block points to. The internal pointers of the block
  1375. * can be in just about any order, and it is likely to have clusters of
  1376. * things that are close together and clusters of things that are not.
  1377. *
  1378. * To help reduce the seeks that come with updating all of these reference
  1379. * counts, sort them by byte number before actual updates are done.
  1380. *
  1381. * struct refsort is used to match byte number to slot in the btree block.
  1382. * we sort based on the byte number and then use the slot to actually
  1383. * find the item.
  1384. *
  1385. * struct refsort is smaller than strcut btrfs_item and smaller than
  1386. * struct btrfs_key_ptr. Since we're currently limited to the page size
  1387. * for a btree block, there's no way for a kmalloc of refsorts for a
  1388. * single node to be bigger than a page.
  1389. */
  1390. struct refsort {
  1391. u64 bytenr;
  1392. u32 slot;
  1393. };
  1394. /*
  1395. * for passing into sort()
  1396. */
  1397. static int refsort_cmp(const void *a_void, const void *b_void)
  1398. {
  1399. const struct refsort *a = a_void;
  1400. const struct refsort *b = b_void;
  1401. if (a->bytenr < b->bytenr)
  1402. return -1;
  1403. if (a->bytenr > b->bytenr)
  1404. return 1;
  1405. return 0;
  1406. }
  1407. noinline int btrfs_inc_ref(struct btrfs_trans_handle *trans,
  1408. struct btrfs_root *root,
  1409. struct extent_buffer *orig_buf,
  1410. struct extent_buffer *buf, u32 *nr_extents)
  1411. {
  1412. u64 bytenr;
  1413. u64 ref_root;
  1414. u64 orig_root;
  1415. u64 ref_generation;
  1416. u64 orig_generation;
  1417. struct refsort *sorted;
  1418. u32 nritems;
  1419. u32 nr_file_extents = 0;
  1420. struct btrfs_key key;
  1421. struct btrfs_file_extent_item *fi;
  1422. int i;
  1423. int level;
  1424. int ret = 0;
  1425. int faili = 0;
  1426. int refi = 0;
  1427. int slot;
  1428. int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
  1429. u64, u64, u64, u64, u64, u64, u64, u64);
  1430. ref_root = btrfs_header_owner(buf);
  1431. ref_generation = btrfs_header_generation(buf);
  1432. orig_root = btrfs_header_owner(orig_buf);
  1433. orig_generation = btrfs_header_generation(orig_buf);
  1434. nritems = btrfs_header_nritems(buf);
  1435. level = btrfs_header_level(buf);
  1436. sorted = kmalloc(sizeof(struct refsort) * nritems, GFP_NOFS);
  1437. BUG_ON(!sorted);
  1438. if (root->ref_cows) {
  1439. process_func = __btrfs_inc_extent_ref;
  1440. } else {
  1441. if (level == 0 &&
  1442. root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
  1443. goto out;
  1444. if (level != 0 &&
  1445. root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
  1446. goto out;
  1447. process_func = __btrfs_update_extent_ref;
  1448. }
  1449. /*
  1450. * we make two passes through the items. In the first pass we
  1451. * only record the byte number and slot. Then we sort based on
  1452. * byte number and do the actual work based on the sorted results
  1453. */
  1454. for (i = 0; i < nritems; i++) {
  1455. cond_resched();
  1456. if (level == 0) {
  1457. btrfs_item_key_to_cpu(buf, &key, i);
  1458. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  1459. continue;
  1460. fi = btrfs_item_ptr(buf, i,
  1461. struct btrfs_file_extent_item);
  1462. if (btrfs_file_extent_type(buf, fi) ==
  1463. BTRFS_FILE_EXTENT_INLINE)
  1464. continue;
  1465. bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  1466. if (bytenr == 0)
  1467. continue;
  1468. nr_file_extents++;
  1469. sorted[refi].bytenr = bytenr;
  1470. sorted[refi].slot = i;
  1471. refi++;
  1472. } else {
  1473. bytenr = btrfs_node_blockptr(buf, i);
  1474. sorted[refi].bytenr = bytenr;
  1475. sorted[refi].slot = i;
  1476. refi++;
  1477. }
  1478. }
  1479. /*
  1480. * if refi == 0, we didn't actually put anything into the sorted
  1481. * array and we're done
  1482. */
  1483. if (refi == 0)
  1484. goto out;
  1485. sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
  1486. for (i = 0; i < refi; i++) {
  1487. cond_resched();
  1488. slot = sorted[i].slot;
  1489. bytenr = sorted[i].bytenr;
  1490. if (level == 0) {
  1491. btrfs_item_key_to_cpu(buf, &key, slot);
  1492. ret = process_func(trans, root, bytenr,
  1493. orig_buf->start, buf->start,
  1494. orig_root, ref_root,
  1495. orig_generation, ref_generation,
  1496. key.objectid);
  1497. if (ret) {
  1498. faili = slot;
  1499. WARN_ON(1);
  1500. goto fail;
  1501. }
  1502. } else {
  1503. ret = process_func(trans, root, bytenr,
  1504. orig_buf->start, buf->start,
  1505. orig_root, ref_root,
  1506. orig_generation, ref_generation,
  1507. level - 1);
  1508. if (ret) {
  1509. faili = slot;
  1510. WARN_ON(1);
  1511. goto fail;
  1512. }
  1513. }
  1514. }
  1515. out:
  1516. kfree(sorted);
  1517. if (nr_extents) {
  1518. if (level == 0)
  1519. *nr_extents = nr_file_extents;
  1520. else
  1521. *nr_extents = nritems;
  1522. }
  1523. return 0;
  1524. fail:
  1525. kfree(sorted);
  1526. WARN_ON(1);
  1527. return ret;
  1528. }
  1529. int btrfs_update_ref(struct btrfs_trans_handle *trans,
  1530. struct btrfs_root *root, struct extent_buffer *orig_buf,
  1531. struct extent_buffer *buf, int start_slot, int nr)
  1532. {
  1533. u64 bytenr;
  1534. u64 ref_root;
  1535. u64 orig_root;
  1536. u64 ref_generation;
  1537. u64 orig_generation;
  1538. struct btrfs_key key;
  1539. struct btrfs_file_extent_item *fi;
  1540. int i;
  1541. int ret;
  1542. int slot;
  1543. int level;
  1544. BUG_ON(start_slot < 0);
  1545. BUG_ON(start_slot + nr > btrfs_header_nritems(buf));
  1546. ref_root = btrfs_header_owner(buf);
  1547. ref_generation = btrfs_header_generation(buf);
  1548. orig_root = btrfs_header_owner(orig_buf);
  1549. orig_generation = btrfs_header_generation(orig_buf);
  1550. level = btrfs_header_level(buf);
  1551. if (!root->ref_cows) {
  1552. if (level == 0 &&
  1553. root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
  1554. return 0;
  1555. if (level != 0 &&
  1556. root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
  1557. return 0;
  1558. }
  1559. for (i = 0, slot = start_slot; i < nr; i++, slot++) {
  1560. cond_resched();
  1561. if (level == 0) {
  1562. btrfs_item_key_to_cpu(buf, &key, slot);
  1563. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  1564. continue;
  1565. fi = btrfs_item_ptr(buf, slot,
  1566. struct btrfs_file_extent_item);
  1567. if (btrfs_file_extent_type(buf, fi) ==
  1568. BTRFS_FILE_EXTENT_INLINE)
  1569. continue;
  1570. bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  1571. if (bytenr == 0)
  1572. continue;
  1573. ret = __btrfs_update_extent_ref(trans, root, bytenr,
  1574. orig_buf->start, buf->start,
  1575. orig_root, ref_root,
  1576. orig_generation, ref_generation,
  1577. key.objectid);
  1578. if (ret)
  1579. goto fail;
  1580. } else {
  1581. bytenr = btrfs_node_blockptr(buf, slot);
  1582. ret = __btrfs_update_extent_ref(trans, root, bytenr,
  1583. orig_buf->start, buf->start,
  1584. orig_root, ref_root,
  1585. orig_generation, ref_generation,
  1586. level - 1);
  1587. if (ret)
  1588. goto fail;
  1589. }
  1590. }
  1591. return 0;
  1592. fail:
  1593. WARN_ON(1);
  1594. return -1;
  1595. }
  1596. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  1597. struct btrfs_root *root,
  1598. struct btrfs_path *path,
  1599. struct btrfs_block_group_cache *cache)
  1600. {
  1601. int ret;
  1602. int pending_ret;
  1603. struct btrfs_root *extent_root = root->fs_info->extent_root;
  1604. unsigned long bi;
  1605. struct extent_buffer *leaf;
  1606. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  1607. if (ret < 0)
  1608. goto fail;
  1609. BUG_ON(ret);
  1610. leaf = path->nodes[0];
  1611. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  1612. write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
  1613. btrfs_mark_buffer_dirty(leaf);
  1614. btrfs_release_path(extent_root, path);
  1615. fail:
  1616. finish_current_insert(trans, extent_root, 0);
  1617. pending_ret = del_pending_extents(trans, extent_root, 0);
  1618. if (ret)
  1619. return ret;
  1620. if (pending_ret)
  1621. return pending_ret;
  1622. return 0;
  1623. }
  1624. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  1625. struct btrfs_root *root)
  1626. {
  1627. struct btrfs_block_group_cache *cache, *entry;
  1628. struct rb_node *n;
  1629. int err = 0;
  1630. int werr = 0;
  1631. struct btrfs_path *path;
  1632. u64 last = 0;
  1633. path = btrfs_alloc_path();
  1634. if (!path)
  1635. return -ENOMEM;
  1636. while (1) {
  1637. cache = NULL;
  1638. spin_lock(&root->fs_info->block_group_cache_lock);
  1639. for (n = rb_first(&root->fs_info->block_group_cache_tree);
  1640. n; n = rb_next(n)) {
  1641. entry = rb_entry(n, struct btrfs_block_group_cache,
  1642. cache_node);
  1643. if (entry->dirty) {
  1644. cache = entry;
  1645. break;
  1646. }
  1647. }
  1648. spin_unlock(&root->fs_info->block_group_cache_lock);
  1649. if (!cache)
  1650. break;
  1651. cache->dirty = 0;
  1652. last += cache->key.offset;
  1653. err = write_one_cache_group(trans, root,
  1654. path, cache);
  1655. /*
  1656. * if we fail to write the cache group, we want
  1657. * to keep it marked dirty in hopes that a later
  1658. * write will work
  1659. */
  1660. if (err) {
  1661. werr = err;
  1662. continue;
  1663. }
  1664. }
  1665. btrfs_free_path(path);
  1666. return werr;
  1667. }
  1668. int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
  1669. {
  1670. struct btrfs_block_group_cache *block_group;
  1671. int readonly = 0;
  1672. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  1673. if (!block_group || block_group->ro)
  1674. readonly = 1;
  1675. if (block_group)
  1676. put_block_group(block_group);
  1677. return readonly;
  1678. }
  1679. static int update_space_info(struct btrfs_fs_info *info, u64 flags,
  1680. u64 total_bytes, u64 bytes_used,
  1681. struct btrfs_space_info **space_info)
  1682. {
  1683. struct btrfs_space_info *found;
  1684. found = __find_space_info(info, flags);
  1685. if (found) {
  1686. spin_lock(&found->lock);
  1687. found->total_bytes += total_bytes;
  1688. found->bytes_used += bytes_used;
  1689. found->full = 0;
  1690. spin_unlock(&found->lock);
  1691. *space_info = found;
  1692. return 0;
  1693. }
  1694. found = kzalloc(sizeof(*found), GFP_NOFS);
  1695. if (!found)
  1696. return -ENOMEM;
  1697. list_add(&found->list, &info->space_info);
  1698. INIT_LIST_HEAD(&found->block_groups);
  1699. init_rwsem(&found->groups_sem);
  1700. spin_lock_init(&found->lock);
  1701. found->flags = flags;
  1702. found->total_bytes = total_bytes;
  1703. found->bytes_used = bytes_used;
  1704. found->bytes_pinned = 0;
  1705. found->bytes_reserved = 0;
  1706. found->bytes_readonly = 0;
  1707. found->bytes_delalloc = 0;
  1708. found->full = 0;
  1709. found->force_alloc = 0;
  1710. *space_info = found;
  1711. return 0;
  1712. }
  1713. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  1714. {
  1715. u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
  1716. BTRFS_BLOCK_GROUP_RAID1 |
  1717. BTRFS_BLOCK_GROUP_RAID10 |
  1718. BTRFS_BLOCK_GROUP_DUP);
  1719. if (extra_flags) {
  1720. if (flags & BTRFS_BLOCK_GROUP_DATA)
  1721. fs_info->avail_data_alloc_bits |= extra_flags;
  1722. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  1723. fs_info->avail_metadata_alloc_bits |= extra_flags;
  1724. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  1725. fs_info->avail_system_alloc_bits |= extra_flags;
  1726. }
  1727. }
  1728. static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
  1729. {
  1730. spin_lock(&cache->space_info->lock);
  1731. spin_lock(&cache->lock);
  1732. if (!cache->ro) {
  1733. cache->space_info->bytes_readonly += cache->key.offset -
  1734. btrfs_block_group_used(&cache->item);
  1735. cache->ro = 1;
  1736. }
  1737. spin_unlock(&cache->lock);
  1738. spin_unlock(&cache->space_info->lock);
  1739. }
  1740. u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
  1741. {
  1742. u64 num_devices = root->fs_info->fs_devices->rw_devices;
  1743. if (num_devices == 1)
  1744. flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
  1745. if (num_devices < 4)
  1746. flags &= ~BTRFS_BLOCK_GROUP_RAID10;
  1747. if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
  1748. (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  1749. BTRFS_BLOCK_GROUP_RAID10))) {
  1750. flags &= ~BTRFS_BLOCK_GROUP_DUP;
  1751. }
  1752. if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
  1753. (flags & BTRFS_BLOCK_GROUP_RAID10)) {
  1754. flags &= ~BTRFS_BLOCK_GROUP_RAID1;
  1755. }
  1756. if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
  1757. ((flags & BTRFS_BLOCK_GROUP_RAID1) |
  1758. (flags & BTRFS_BLOCK_GROUP_RAID10) |
  1759. (flags & BTRFS_BLOCK_GROUP_DUP)))
  1760. flags &= ~BTRFS_BLOCK_GROUP_RAID0;
  1761. return flags;
  1762. }
  1763. static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
  1764. {
  1765. struct btrfs_fs_info *info = root->fs_info;
  1766. u64 alloc_profile;
  1767. if (data) {
  1768. alloc_profile = info->avail_data_alloc_bits &
  1769. info->data_alloc_profile;
  1770. data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
  1771. } else if (root == root->fs_info->chunk_root) {
  1772. alloc_profile = info->avail_system_alloc_bits &
  1773. info->system_alloc_profile;
  1774. data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
  1775. } else {
  1776. alloc_profile = info->avail_metadata_alloc_bits &
  1777. info->metadata_alloc_profile;
  1778. data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
  1779. }
  1780. return btrfs_reduce_alloc_profile(root, data);
  1781. }
  1782. void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
  1783. {
  1784. u64 alloc_target;
  1785. alloc_target = btrfs_get_alloc_profile(root, 1);
  1786. BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
  1787. alloc_target);
  1788. }
  1789. /*
  1790. * for now this just makes sure we have at least 5% of our metadata space free
  1791. * for use.
  1792. */
  1793. int btrfs_check_metadata_free_space(struct btrfs_root *root)
  1794. {
  1795. struct btrfs_fs_info *info = root->fs_info;
  1796. struct btrfs_space_info *meta_sinfo;
  1797. u64 alloc_target, thresh;
  1798. /* get the space info for where the metadata will live */
  1799. alloc_target = btrfs_get_alloc_profile(root, 0);
  1800. meta_sinfo = __find_space_info(info, alloc_target);
  1801. /*
  1802. * if the metadata area isn't maxed out then there is no sense in
  1803. * checking how much is used, since we can always allocate a new chunk
  1804. */
  1805. if (!meta_sinfo->full)
  1806. return 0;
  1807. spin_lock(&meta_sinfo->lock);
  1808. thresh = meta_sinfo->total_bytes * 95;
  1809. do_div(thresh, 100);
  1810. if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
  1811. meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) {
  1812. spin_unlock(&meta_sinfo->lock);
  1813. return -ENOSPC;
  1814. }
  1815. spin_unlock(&meta_sinfo->lock);
  1816. return 0;
  1817. }
  1818. /*
  1819. * This will check the space that the inode allocates from to make sure we have
  1820. * enough space for bytes.
  1821. */
  1822. int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
  1823. u64 bytes)
  1824. {
  1825. struct btrfs_space_info *data_sinfo;
  1826. int ret = 0;
  1827. /* make sure bytes are sectorsize aligned */
  1828. bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  1829. data_sinfo = BTRFS_I(inode)->space_info;
  1830. again:
  1831. /* make sure we have enough space to handle the data first */
  1832. spin_lock(&data_sinfo->lock);
  1833. if (data_sinfo->total_bytes - data_sinfo->bytes_used -
  1834. data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
  1835. data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
  1836. data_sinfo->bytes_may_use < bytes) {
  1837. /*
  1838. * if we don't have enough free bytes in this space then we need
  1839. * to alloc a new chunk.
  1840. */
  1841. if (!data_sinfo->full) {
  1842. u64 alloc_target;
  1843. struct btrfs_trans_handle *trans;
  1844. data_sinfo->force_alloc = 1;
  1845. spin_unlock(&data_sinfo->lock);
  1846. alloc_target = btrfs_get_alloc_profile(root, 1);
  1847. trans = btrfs_start_transaction(root, 1);
  1848. if (!trans)
  1849. return -ENOMEM;
  1850. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  1851. bytes + 2 * 1024 * 1024,
  1852. alloc_target, 0);
  1853. btrfs_end_transaction(trans, root);
  1854. if (ret)
  1855. return ret;
  1856. goto again;
  1857. }
  1858. spin_unlock(&data_sinfo->lock);
  1859. printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
  1860. ", %llu bytes_used, %llu bytes_reserved, "
  1861. "%llu bytes_pinned, %llu bytes_readonly, %llu may use"
  1862. "%llu total\n", bytes, data_sinfo->bytes_delalloc,
  1863. data_sinfo->bytes_used, data_sinfo->bytes_reserved,
  1864. data_sinfo->bytes_pinned, data_sinfo->bytes_readonly,
  1865. data_sinfo->bytes_may_use, data_sinfo->total_bytes);
  1866. return -ENOSPC;
  1867. }
  1868. data_sinfo->bytes_may_use += bytes;
  1869. BTRFS_I(inode)->reserved_bytes += bytes;
  1870. spin_unlock(&data_sinfo->lock);
  1871. return btrfs_check_metadata_free_space(root);
  1872. }
  1873. /*
  1874. * if there was an error for whatever reason after calling
  1875. * btrfs_check_data_free_space, call this so we can cleanup the counters.
  1876. */
  1877. void btrfs_free_reserved_data_space(struct btrfs_root *root,
  1878. struct inode *inode, u64 bytes)
  1879. {
  1880. struct btrfs_space_info *data_sinfo;
  1881. /* make sure bytes are sectorsize aligned */
  1882. bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  1883. data_sinfo = BTRFS_I(inode)->space_info;
  1884. spin_lock(&data_sinfo->lock);
  1885. data_sinfo->bytes_may_use -= bytes;
  1886. BTRFS_I(inode)->reserved_bytes -= bytes;
  1887. spin_unlock(&data_sinfo->lock);
  1888. }
  1889. /* called when we are adding a delalloc extent to the inode's io_tree */
  1890. void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
  1891. u64 bytes)
  1892. {
  1893. struct btrfs_space_info *data_sinfo;
  1894. /* get the space info for where this inode will be storing its data */
  1895. data_sinfo = BTRFS_I(inode)->space_info;
  1896. /* make sure we have enough space to handle the data first */
  1897. spin_lock(&data_sinfo->lock);
  1898. data_sinfo->bytes_delalloc += bytes;
  1899. /*
  1900. * we are adding a delalloc extent without calling
  1901. * btrfs_check_data_free_space first. This happens on a weird
  1902. * writepage condition, but shouldn't hurt our accounting
  1903. */
  1904. if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
  1905. data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
  1906. BTRFS_I(inode)->reserved_bytes = 0;
  1907. } else {
  1908. data_sinfo->bytes_may_use -= bytes;
  1909. BTRFS_I(inode)->reserved_bytes -= bytes;
  1910. }
  1911. spin_unlock(&data_sinfo->lock);
  1912. }
  1913. /* called when we are clearing an delalloc extent from the inode's io_tree */
  1914. void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
  1915. u64 bytes)
  1916. {
  1917. struct btrfs_space_info *info;
  1918. info = BTRFS_I(inode)->space_info;
  1919. spin_lock(&info->lock);
  1920. info->bytes_delalloc -= bytes;
  1921. spin_unlock(&info->lock);
  1922. }
  1923. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  1924. struct btrfs_root *extent_root, u64 alloc_bytes,
  1925. u64 flags, int force)
  1926. {
  1927. struct btrfs_space_info *space_info;
  1928. u64 thresh;
  1929. int ret = 0;
  1930. mutex_lock(&extent_root->fs_info->chunk_mutex);
  1931. flags = btrfs_reduce_alloc_profile(extent_root, flags);
  1932. space_info = __find_space_info(extent_root->fs_info, flags);
  1933. if (!space_info) {
  1934. ret = update_space_info(extent_root->fs_info, flags,
  1935. 0, 0, &space_info);
  1936. BUG_ON(ret);
  1937. }
  1938. BUG_ON(!space_info);
  1939. spin_lock(&space_info->lock);
  1940. if (space_info->force_alloc) {
  1941. force = 1;
  1942. space_info->force_alloc = 0;
  1943. }
  1944. if (space_info->full) {
  1945. spin_unlock(&space_info->lock);
  1946. goto out;
  1947. }
  1948. thresh = space_info->total_bytes - space_info->bytes_readonly;
  1949. thresh = div_factor(thresh, 6);
  1950. if (!force &&
  1951. (space_info->bytes_used + space_info->bytes_pinned +
  1952. space_info->bytes_reserved + alloc_bytes) < thresh) {
  1953. spin_unlock(&space_info->lock);
  1954. goto out;
  1955. }
  1956. spin_unlock(&space_info->lock);
  1957. ret = btrfs_alloc_chunk(trans, extent_root, flags);
  1958. if (ret)
  1959. space_info->full = 1;
  1960. out:
  1961. mutex_unlock(&extent_root->fs_info->chunk_mutex);
  1962. return ret;
  1963. }
  1964. static int update_block_group(struct btrfs_trans_handle *trans,
  1965. struct btrfs_root *root,
  1966. u64 bytenr, u64 num_bytes, int alloc,
  1967. int mark_free)
  1968. {
  1969. struct btrfs_block_group_cache *cache;
  1970. struct btrfs_fs_info *info = root->fs_info;
  1971. u64 total = num_bytes;
  1972. u64 old_val;
  1973. u64 byte_in_group;
  1974. while (total) {
  1975. cache = btrfs_lookup_block_group(info, bytenr);
  1976. if (!cache)
  1977. return -1;
  1978. byte_in_group = bytenr - cache->key.objectid;
  1979. WARN_ON(byte_in_group > cache->key.offset);
  1980. spin_lock(&cache->space_info->lock);
  1981. spin_lock(&cache->lock);
  1982. cache->dirty = 1;
  1983. old_val = btrfs_block_group_used(&cache->item);
  1984. num_bytes = min(total, cache->key.offset - byte_in_group);
  1985. if (alloc) {
  1986. old_val += num_bytes;
  1987. cache->space_info->bytes_used += num_bytes;
  1988. if (cache->ro)
  1989. cache->space_info->bytes_readonly -= num_bytes;
  1990. btrfs_set_block_group_used(&cache->item, old_val);
  1991. spin_unlock(&cache->lock);
  1992. spin_unlock(&cache->space_info->lock);
  1993. } else {
  1994. old_val -= num_bytes;
  1995. cache->space_info->bytes_used -= num_bytes;
  1996. if (cache->ro)
  1997. cache->space_info->bytes_readonly += num_bytes;
  1998. btrfs_set_block_group_used(&cache->item, old_val);
  1999. spin_unlock(&cache->lock);
  2000. spin_unlock(&cache->space_info->lock);
  2001. if (mark_free) {
  2002. int ret;
  2003. ret = btrfs_discard_extent(root, bytenr,
  2004. num_bytes);
  2005. WARN_ON(ret);
  2006. ret = btrfs_add_free_space(cache, bytenr,
  2007. num_bytes);
  2008. WARN_ON(ret);
  2009. }
  2010. }
  2011. put_block_group(cache);
  2012. total -= num_bytes;
  2013. bytenr += num_bytes;
  2014. }
  2015. return 0;
  2016. }
  2017. static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
  2018. {
  2019. struct btrfs_block_group_cache *cache;
  2020. u64 bytenr;
  2021. cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
  2022. if (!cache)
  2023. return 0;
  2024. bytenr = cache->key.objectid;
  2025. put_block_group(cache);
  2026. return bytenr;
  2027. }
  2028. int btrfs_update_pinned_extents(struct btrfs_root *root,
  2029. u64 bytenr, u64 num, int pin)
  2030. {
  2031. u64 len;
  2032. struct btrfs_block_group_cache *cache;
  2033. struct btrfs_fs_info *fs_info = root->fs_info;
  2034. WARN_ON(!mutex_is_locked(&root->fs_info->pinned_mutex));
  2035. if (pin) {
  2036. set_extent_dirty(&fs_info->pinned_extents,
  2037. bytenr, bytenr + num - 1, GFP_NOFS);
  2038. } else {
  2039. clear_extent_dirty(&fs_info->pinned_extents,
  2040. bytenr, bytenr + num - 1, GFP_NOFS);
  2041. }
  2042. while (num > 0) {
  2043. cache = btrfs_lookup_block_group(fs_info, bytenr);
  2044. BUG_ON(!cache);
  2045. len = min(num, cache->key.offset -
  2046. (bytenr - cache->key.objectid));
  2047. if (pin) {
  2048. spin_lock(&cache->space_info->lock);
  2049. spin_lock(&cache->lock);
  2050. cache->pinned += len;
  2051. cache->space_info->bytes_pinned += len;
  2052. spin_unlock(&cache->lock);
  2053. spin_unlock(&cache->space_info->lock);
  2054. fs_info->total_pinned += len;
  2055. } else {
  2056. spin_lock(&cache->space_info->lock);
  2057. spin_lock(&cache->lock);
  2058. cache->pinned -= len;
  2059. cache->space_info->bytes_pinned -= len;
  2060. spin_unlock(&cache->lock);
  2061. spin_unlock(&cache->space_info->lock);
  2062. fs_info->total_pinned -= len;
  2063. if (cache->cached)
  2064. btrfs_add_free_space(cache, bytenr, len);
  2065. }
  2066. put_block_group(cache);
  2067. bytenr += len;
  2068. num -= len;
  2069. }
  2070. return 0;
  2071. }
  2072. static int update_reserved_extents(struct btrfs_root *root,
  2073. u64 bytenr, u64 num, int reserve)
  2074. {
  2075. u64 len;
  2076. struct btrfs_block_group_cache *cache;
  2077. struct btrfs_fs_info *fs_info = root->fs_info;
  2078. while (num > 0) {
  2079. cache = btrfs_lookup_block_group(fs_info, bytenr);
  2080. BUG_ON(!cache);
  2081. len = min(num, cache->key.offset -
  2082. (bytenr - cache->key.objectid));
  2083. spin_lock(&cache->space_info->lock);
  2084. spin_lock(&cache->lock);
  2085. if (reserve) {
  2086. cache->reserved += len;
  2087. cache->space_info->bytes_reserved += len;
  2088. } else {
  2089. cache->reserved -= len;
  2090. cache->space_info->bytes_reserved -= len;
  2091. }
  2092. spin_unlock(&cache->lock);
  2093. spin_unlock(&cache->space_info->lock);
  2094. put_block_group(cache);
  2095. bytenr += len;
  2096. num -= len;
  2097. }
  2098. return 0;
  2099. }
  2100. int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
  2101. {
  2102. u64 last = 0;
  2103. u64 start;
  2104. u64 end;
  2105. struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
  2106. int ret;
  2107. mutex_lock(&root->fs_info->pinned_mutex);
  2108. while (1) {
  2109. ret = find_first_extent_bit(pinned_extents, last,
  2110. &start, &end, EXTENT_DIRTY);
  2111. if (ret)
  2112. break;
  2113. set_extent_dirty(copy, start, end, GFP_NOFS);
  2114. last = end + 1;
  2115. }
  2116. mutex_unlock(&root->fs_info->pinned_mutex);
  2117. return 0;
  2118. }
  2119. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  2120. struct btrfs_root *root,
  2121. struct extent_io_tree *unpin)
  2122. {
  2123. u64 start;
  2124. u64 end;
  2125. int ret;
  2126. mutex_lock(&root->fs_info->pinned_mutex);
  2127. while (1) {
  2128. ret = find_first_extent_bit(unpin, 0, &start, &end,
  2129. EXTENT_DIRTY);
  2130. if (ret)
  2131. break;
  2132. ret = btrfs_discard_extent(root, start, end + 1 - start);
  2133. btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
  2134. clear_extent_dirty(unpin, start, end, GFP_NOFS);
  2135. if (need_resched()) {
  2136. mutex_unlock(&root->fs_info->pinned_mutex);
  2137. cond_resched();
  2138. mutex_lock(&root->fs_info->pinned_mutex);
  2139. }
  2140. }
  2141. mutex_unlock(&root->fs_info->pinned_mutex);
  2142. return ret;
  2143. }
  2144. static int finish_current_insert(struct btrfs_trans_handle *trans,
  2145. struct btrfs_root *extent_root, int all)
  2146. {
  2147. u64 start;
  2148. u64 end;
  2149. u64 priv;
  2150. u64 search = 0;
  2151. struct btrfs_fs_info *info = extent_root->fs_info;
  2152. struct btrfs_path *path;
  2153. struct pending_extent_op *extent_op, *tmp;
  2154. struct list_head insert_list, update_list;
  2155. int ret;
  2156. int num_inserts = 0, max_inserts, restart = 0;
  2157. path = btrfs_alloc_path();
  2158. INIT_LIST_HEAD(&insert_list);
  2159. INIT_LIST_HEAD(&update_list);
  2160. max_inserts = extent_root->leafsize /
  2161. (2 * sizeof(struct btrfs_key) + 2 * sizeof(struct btrfs_item) +
  2162. sizeof(struct btrfs_extent_ref) +
  2163. sizeof(struct btrfs_extent_item));
  2164. again:
  2165. mutex_lock(&info->extent_ins_mutex);
  2166. while (1) {
  2167. ret = find_first_extent_bit(&info->extent_ins, search, &start,
  2168. &end, EXTENT_WRITEBACK);
  2169. if (ret) {
  2170. if (restart && !num_inserts &&
  2171. list_empty(&update_list)) {
  2172. restart = 0;
  2173. search = 0;
  2174. continue;
  2175. }
  2176. break;
  2177. }
  2178. ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
  2179. if (!ret) {
  2180. if (all)
  2181. restart = 1;
  2182. search = end + 1;
  2183. if (need_resched()) {
  2184. mutex_unlock(&info->extent_ins_mutex);
  2185. cond_resched();
  2186. mutex_lock(&info->extent_ins_mutex);
  2187. }
  2188. continue;
  2189. }
  2190. ret = get_state_private(&info->extent_ins, start, &priv);
  2191. BUG_ON(ret);
  2192. extent_op = (struct pending_extent_op *)(unsigned long) priv;
  2193. if (extent_op->type == PENDING_EXTENT_INSERT) {
  2194. num_inserts++;
  2195. list_add_tail(&extent_op->list, &insert_list);
  2196. search = end + 1;
  2197. if (num_inserts == max_inserts) {
  2198. restart = 1;
  2199. break;
  2200. }
  2201. } else if (extent_op->type == PENDING_BACKREF_UPDATE) {
  2202. list_add_tail(&extent_op->list, &update_list);
  2203. search = end + 1;
  2204. } else {
  2205. BUG();
  2206. }
  2207. }
  2208. /*
  2209. * process the update list, clear the writeback bit for it, and if
  2210. * somebody marked this thing for deletion then just unlock it and be
  2211. * done, the free_extents will handle it
  2212. */
  2213. list_for_each_entry_safe(extent_op, tmp, &update_list, list) {
  2214. clear_extent_bits(&info->extent_ins, extent_op->bytenr,
  2215. extent_op->bytenr + extent_op->num_bytes - 1,
  2216. EXTENT_WRITEBACK, GFP_NOFS);
  2217. if (extent_op->del) {
  2218. list_del_init(&extent_op->list);
  2219. unlock_extent(&info->extent_ins, extent_op->bytenr,
  2220. extent_op->bytenr + extent_op->num_bytes
  2221. - 1, GFP_NOFS);
  2222. kfree(extent_op);
  2223. }
  2224. }
  2225. mutex_unlock(&info->extent_ins_mutex);
  2226. /*
  2227. * still have things left on the update list, go ahead an update
  2228. * everything
  2229. */
  2230. if (!list_empty(&update_list)) {
  2231. ret = update_backrefs(trans, extent_root, path, &update_list);
  2232. BUG_ON(ret);
  2233. /* we may have COW'ed new blocks, so lets start over */
  2234. if (all)
  2235. restart = 1;
  2236. }
  2237. /*
  2238. * if no inserts need to be done, but we skipped some extents and we
  2239. * need to make sure everything is cleaned then reset everything and
  2240. * go back to the beginning
  2241. */
  2242. if (!num_inserts && restart) {
  2243. search = 0;
  2244. restart = 0;
  2245. INIT_LIST_HEAD(&update_list);
  2246. INIT_LIST_HEAD(&insert_list);
  2247. goto again;
  2248. } else if (!num_inserts) {
  2249. goto out;
  2250. }
  2251. /*
  2252. * process the insert extents list. Again if we are deleting this
  2253. * extent, then just unlock it, pin down the bytes if need be, and be
  2254. * done with it. Saves us from having to actually insert the extent
  2255. * into the tree and then subsequently come along and delete it
  2256. */
  2257. mutex_lock(&info->extent_ins_mutex);
  2258. list_for_each_entry_safe(extent_op, tmp, &insert_list, list) {
  2259. clear_extent_bits(&info->extent_ins, extent_op->bytenr,
  2260. extent_op->bytenr + extent_op->num_bytes - 1,
  2261. EXTENT_WRITEBACK, GFP_NOFS);
  2262. if (extent_op->del) {
  2263. u64 used;
  2264. list_del_init(&extent_op->list);
  2265. unlock_extent(&info->extent_ins, extent_op->bytenr,
  2266. extent_op->bytenr + extent_op->num_bytes
  2267. - 1, GFP_NOFS);
  2268. mutex_lock(&extent_root->fs_info->pinned_mutex);
  2269. ret = pin_down_bytes(trans, extent_root,
  2270. extent_op->bytenr,
  2271. extent_op->num_bytes, 0);
  2272. mutex_unlock(&extent_root->fs_info->pinned_mutex);
  2273. spin_lock(&info->delalloc_lock);
  2274. used = btrfs_super_bytes_used(&info->super_copy);
  2275. btrfs_set_super_bytes_used(&info->super_copy,
  2276. used - extent_op->num_bytes);
  2277. used = btrfs_root_used(&extent_root->root_item);
  2278. btrfs_set_root_used(&extent_root->root_item,
  2279. used - extent_op->num_bytes);
  2280. spin_unlock(&info->delalloc_lock);
  2281. ret = update_block_group(trans, extent_root,
  2282. extent_op->bytenr,
  2283. extent_op->num_bytes,
  2284. 0, ret > 0);
  2285. BUG_ON(ret);
  2286. kfree(extent_op);
  2287. num_inserts--;
  2288. }
  2289. }
  2290. mutex_unlock(&info->extent_ins_mutex);
  2291. ret = insert_extents(trans, extent_root, path, &insert_list,
  2292. num_inserts);
  2293. BUG_ON(ret);
  2294. /*
  2295. * if restart is set for whatever reason we need to go back and start
  2296. * searching through the pending list again.
  2297. *
  2298. * We just inserted some extents, which could have resulted in new
  2299. * blocks being allocated, which would result in new blocks needing
  2300. * updates, so if all is set we _must_ restart to get the updated
  2301. * blocks.
  2302. */
  2303. if (restart || all) {
  2304. INIT_LIST_HEAD(&insert_list);
  2305. INIT_LIST_HEAD(&update_list);
  2306. search = 0;
  2307. restart = 0;
  2308. num_inserts = 0;
  2309. goto again;
  2310. }
  2311. out:
  2312. btrfs_free_path(path);
  2313. return 0;
  2314. }
  2315. static int pin_down_bytes(struct btrfs_trans_handle *trans,
  2316. struct btrfs_root *root,
  2317. u64 bytenr, u64 num_bytes, int is_data)
  2318. {
  2319. int err = 0;
  2320. struct extent_buffer *buf;
  2321. if (is_data)
  2322. goto pinit;
  2323. buf = btrfs_find_tree_block(root, bytenr, num_bytes);
  2324. if (!buf)
  2325. goto pinit;
  2326. /* we can reuse a block if it hasn't been written
  2327. * and it is from this transaction. We can't
  2328. * reuse anything from the tree log root because
  2329. * it has tiny sub-transactions.
  2330. */
  2331. if (btrfs_buffer_uptodate(buf, 0) &&
  2332. btrfs_try_tree_lock(buf)) {
  2333. u64 header_owner = btrfs_header_owner(buf);
  2334. u64 header_transid = btrfs_header_generation(buf);
  2335. if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
  2336. header_owner != BTRFS_TREE_RELOC_OBJECTID &&
  2337. header_transid == trans->transid &&
  2338. !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
  2339. clean_tree_block(NULL, root, buf);
  2340. btrfs_tree_unlock(buf);
  2341. free_extent_buffer(buf);
  2342. return 1;
  2343. }
  2344. btrfs_tree_unlock(buf);
  2345. }
  2346. free_extent_buffer(buf);
  2347. pinit:
  2348. btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
  2349. BUG_ON(err < 0);
  2350. return 0;
  2351. }
  2352. /*
  2353. * remove an extent from the root, returns 0 on success
  2354. */
  2355. static int __free_extent(struct btrfs_trans_handle *trans,
  2356. struct btrfs_root *root,
  2357. u64 bytenr, u64 num_bytes, u64 parent,
  2358. u64 root_objectid, u64 ref_generation,
  2359. u64 owner_objectid, int pin, int mark_free)
  2360. {
  2361. struct btrfs_path *path;
  2362. struct btrfs_key key;
  2363. struct btrfs_fs_info *info = root->fs_info;
  2364. struct btrfs_root *extent_root = info->extent_root;
  2365. struct extent_buffer *leaf;
  2366. int ret;
  2367. int extent_slot = 0;
  2368. int found_extent = 0;
  2369. int num_to_del = 1;
  2370. struct btrfs_extent_item *ei;
  2371. u32 refs;
  2372. key.objectid = bytenr;
  2373. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  2374. key.offset = num_bytes;
  2375. path = btrfs_alloc_path();
  2376. if (!path)
  2377. return -ENOMEM;
  2378. path->reada = 1;
  2379. ret = lookup_extent_backref(trans, extent_root, path,
  2380. bytenr, parent, root_objectid,
  2381. ref_generation, owner_objectid, 1);
  2382. if (ret == 0) {
  2383. struct btrfs_key found_key;
  2384. extent_slot = path->slots[0];
  2385. while (extent_slot > 0) {
  2386. extent_slot--;
  2387. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  2388. extent_slot);
  2389. if (found_key.objectid != bytenr)
  2390. break;
  2391. if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
  2392. found_key.offset == num_bytes) {
  2393. found_extent = 1;
  2394. break;
  2395. }
  2396. if (path->slots[0] - extent_slot > 5)
  2397. break;
  2398. }
  2399. if (!found_extent) {
  2400. ret = remove_extent_backref(trans, extent_root, path);
  2401. BUG_ON(ret);
  2402. btrfs_release_path(extent_root, path);
  2403. ret = btrfs_search_slot(trans, extent_root,
  2404. &key, path, -1, 1);
  2405. if (ret) {
  2406. printk(KERN_ERR "umm, got %d back from search"
  2407. ", was looking for %llu\n", ret,
  2408. (unsigned long long)bytenr);
  2409. btrfs_print_leaf(extent_root, path->nodes[0]);
  2410. }
  2411. BUG_ON(ret);
  2412. extent_slot = path->slots[0];
  2413. }
  2414. } else {
  2415. btrfs_print_leaf(extent_root, path->nodes[0]);
  2416. WARN_ON(1);
  2417. printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
  2418. "root %llu gen %llu owner %llu\n",
  2419. (unsigned long long)bytenr,
  2420. (unsigned long long)root_objectid,
  2421. (unsigned long long)ref_generation,
  2422. (unsigned long long)owner_objectid);
  2423. }
  2424. leaf = path->nodes[0];
  2425. ei = btrfs_item_ptr(leaf, extent_slot,
  2426. struct btrfs_extent_item);
  2427. refs = btrfs_extent_refs(leaf, ei);
  2428. BUG_ON(refs == 0);
  2429. refs -= 1;
  2430. btrfs_set_extent_refs(leaf, ei, refs);
  2431. btrfs_mark_buffer_dirty(leaf);
  2432. if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
  2433. struct btrfs_extent_ref *ref;
  2434. ref = btrfs_item_ptr(leaf, path->slots[0],
  2435. struct btrfs_extent_ref);
  2436. BUG_ON(btrfs_ref_num_refs(leaf, ref) != 1);
  2437. /* if the back ref and the extent are next to each other
  2438. * they get deleted below in one shot
  2439. */
  2440. path->slots[0] = extent_slot;
  2441. num_to_del = 2;
  2442. } else if (found_extent) {
  2443. /* otherwise delete the extent back ref */
  2444. ret = remove_extent_backref(trans, extent_root, path);
  2445. BUG_ON(ret);
  2446. /* if refs are 0, we need to setup the path for deletion */
  2447. if (refs == 0) {
  2448. btrfs_release_path(extent_root, path);
  2449. ret = btrfs_search_slot(trans, extent_root, &key, path,
  2450. -1, 1);
  2451. BUG_ON(ret);
  2452. }
  2453. }
  2454. if (refs == 0) {
  2455. u64 super_used;
  2456. u64 root_used;
  2457. if (pin) {
  2458. mutex_lock(&root->fs_info->pinned_mutex);
  2459. ret = pin_down_bytes(trans, root, bytenr, num_bytes,
  2460. owner_objectid >= BTRFS_FIRST_FREE_OBJECTID);
  2461. mutex_unlock(&root->fs_info->pinned_mutex);
  2462. if (ret > 0)
  2463. mark_free = 1;
  2464. BUG_ON(ret < 0);
  2465. }
  2466. /* block accounting for super block */
  2467. spin_lock(&info->delalloc_lock);
  2468. super_used = btrfs_super_bytes_used(&info->super_copy);
  2469. btrfs_set_super_bytes_used(&info->super_copy,
  2470. super_used - num_bytes);
  2471. /* block accounting for root item */
  2472. root_used = btrfs_root_used(&root->root_item);
  2473. btrfs_set_root_used(&root->root_item,
  2474. root_used - num_bytes);
  2475. spin_unlock(&info->delalloc_lock);
  2476. ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
  2477. num_to_del);
  2478. BUG_ON(ret);
  2479. btrfs_release_path(extent_root, path);
  2480. if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
  2481. ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
  2482. BUG_ON(ret);
  2483. }
  2484. ret = update_block_group(trans, root, bytenr, num_bytes, 0,
  2485. mark_free);
  2486. BUG_ON(ret);
  2487. }
  2488. btrfs_free_path(path);
  2489. finish_current_insert(trans, extent_root, 0);
  2490. return ret;
  2491. }
  2492. /*
  2493. * find all the blocks marked as pending in the radix tree and remove
  2494. * them from the extent map
  2495. */
  2496. static int del_pending_extents(struct btrfs_trans_handle *trans,
  2497. struct btrfs_root *extent_root, int all)
  2498. {
  2499. int ret;
  2500. int err = 0;
  2501. u64 start;
  2502. u64 end;
  2503. u64 priv;
  2504. u64 search = 0;
  2505. int nr = 0, skipped = 0;
  2506. struct extent_io_tree *pending_del;
  2507. struct extent_io_tree *extent_ins;
  2508. struct pending_extent_op *extent_op;
  2509. struct btrfs_fs_info *info = extent_root->fs_info;
  2510. struct list_head delete_list;
  2511. INIT_LIST_HEAD(&delete_list);
  2512. extent_ins = &extent_root->fs_info->extent_ins;
  2513. pending_del = &extent_root->fs_info->pending_del;
  2514. again:
  2515. mutex_lock(&info->extent_ins_mutex);
  2516. while (1) {
  2517. ret = find_first_extent_bit(pending_del, search, &start, &end,
  2518. EXTENT_WRITEBACK);
  2519. if (ret) {
  2520. if (all && skipped && !nr) {
  2521. search = 0;
  2522. skipped = 0;
  2523. continue;
  2524. }
  2525. mutex_unlock(&info->extent_ins_mutex);
  2526. break;
  2527. }
  2528. ret = try_lock_extent(extent_ins, start, end, GFP_NOFS);
  2529. if (!ret) {
  2530. search = end+1;
  2531. skipped = 1;
  2532. if (need_resched()) {
  2533. mutex_unlock(&info->extent_ins_mutex);
  2534. cond_resched();
  2535. mutex_lock(&info->extent_ins_mutex);
  2536. }
  2537. continue;
  2538. }
  2539. BUG_ON(ret < 0);
  2540. ret = get_state_private(pending_del, start, &priv);
  2541. BUG_ON(ret);
  2542. extent_op = (struct pending_extent_op *)(unsigned long)priv;
  2543. clear_extent_bits(pending_del, start, end, EXTENT_WRITEBACK,
  2544. GFP_NOFS);
  2545. if (!test_range_bit(extent_ins, start, end,
  2546. EXTENT_WRITEBACK, 0)) {
  2547. list_add_tail(&extent_op->list, &delete_list);
  2548. nr++;
  2549. } else {
  2550. kfree(extent_op);
  2551. ret = get_state_private(&info->extent_ins, start,
  2552. &priv);
  2553. BUG_ON(ret);
  2554. extent_op = (struct pending_extent_op *)
  2555. (unsigned long)priv;
  2556. clear_extent_bits(&info->extent_ins, start, end,
  2557. EXTENT_WRITEBACK, GFP_NOFS);
  2558. if (extent_op->type == PENDING_BACKREF_UPDATE) {
  2559. list_add_tail(&extent_op->list, &delete_list);
  2560. search = end + 1;
  2561. nr++;
  2562. continue;
  2563. }
  2564. mutex_lock(&extent_root->fs_info->pinned_mutex);
  2565. ret = pin_down_bytes(trans, extent_root, start,
  2566. end + 1 - start, 0);
  2567. mutex_unlock(&extent_root->fs_info->pinned_mutex);
  2568. ret = update_block_group(trans, extent_root, start,
  2569. end + 1 - start, 0, ret > 0);
  2570. unlock_extent(extent_ins, start, end, GFP_NOFS);
  2571. BUG_ON(ret);
  2572. kfree(extent_op);
  2573. }
  2574. if (ret)
  2575. err = ret;
  2576. search = end + 1;
  2577. if (need_resched()) {
  2578. mutex_unlock(&info->extent_ins_mutex);
  2579. cond_resched();
  2580. mutex_lock(&info->extent_ins_mutex);
  2581. }
  2582. }
  2583. if (nr) {
  2584. ret = free_extents(trans, extent_root, &delete_list);
  2585. BUG_ON(ret);
  2586. }
  2587. if (all && skipped) {
  2588. INIT_LIST_HEAD(&delete_list);
  2589. search = 0;
  2590. nr = 0;
  2591. goto again;
  2592. }
  2593. if (!err)
  2594. finish_current_insert(trans, extent_root, 0);
  2595. return err;
  2596. }
  2597. /*
  2598. * remove an extent from the root, returns 0 on success
  2599. */
  2600. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  2601. struct btrfs_root *root,
  2602. u64 bytenr, u64 num_bytes, u64 parent,
  2603. u64 root_objectid, u64 ref_generation,
  2604. u64 owner_objectid, int pin)
  2605. {
  2606. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2607. int pending_ret;
  2608. int ret;
  2609. WARN_ON(num_bytes < root->sectorsize);
  2610. if (root == extent_root) {
  2611. struct pending_extent_op *extent_op = NULL;
  2612. mutex_lock(&root->fs_info->extent_ins_mutex);
  2613. if (test_range_bit(&root->fs_info->extent_ins, bytenr,
  2614. bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
  2615. u64 priv;
  2616. ret = get_state_private(&root->fs_info->extent_ins,
  2617. bytenr, &priv);
  2618. BUG_ON(ret);
  2619. extent_op = (struct pending_extent_op *)
  2620. (unsigned long)priv;
  2621. extent_op->del = 1;
  2622. if (extent_op->type == PENDING_EXTENT_INSERT) {
  2623. mutex_unlock(&root->fs_info->extent_ins_mutex);
  2624. return 0;
  2625. }
  2626. }
  2627. if (extent_op) {
  2628. ref_generation = extent_op->orig_generation;
  2629. parent = extent_op->orig_parent;
  2630. }
  2631. extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
  2632. BUG_ON(!extent_op);
  2633. extent_op->type = PENDING_EXTENT_DELETE;
  2634. extent_op->bytenr = bytenr;
  2635. extent_op->num_bytes = num_bytes;
  2636. extent_op->parent = parent;
  2637. extent_op->orig_parent = parent;
  2638. extent_op->generation = ref_generation;
  2639. extent_op->orig_generation = ref_generation;
  2640. extent_op->level = (int)owner_objectid;
  2641. INIT_LIST_HEAD(&extent_op->list);
  2642. extent_op->del = 0;
  2643. set_extent_bits(&root->fs_info->pending_del,
  2644. bytenr, bytenr + num_bytes - 1,
  2645. EXTENT_WRITEBACK, GFP_NOFS);
  2646. set_state_private(&root->fs_info->pending_del,
  2647. bytenr, (unsigned long)extent_op);
  2648. mutex_unlock(&root->fs_info->extent_ins_mutex);
  2649. return 0;
  2650. }
  2651. /* if metadata always pin */
  2652. if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
  2653. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  2654. mutex_lock(&root->fs_info->pinned_mutex);
  2655. btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
  2656. mutex_unlock(&root->fs_info->pinned_mutex);
  2657. update_reserved_extents(root, bytenr, num_bytes, 0);
  2658. return 0;
  2659. }
  2660. pin = 1;
  2661. }
  2662. /* if data pin when any transaction has committed this */
  2663. if (ref_generation != trans->transid)
  2664. pin = 1;
  2665. ret = __free_extent(trans, root, bytenr, num_bytes, parent,
  2666. root_objectid, ref_generation,
  2667. owner_objectid, pin, pin == 0);
  2668. finish_current_insert(trans, root->fs_info->extent_root, 0);
  2669. pending_ret = del_pending_extents(trans, root->fs_info->extent_root, 0);
  2670. return ret ? ret : pending_ret;
  2671. }
  2672. int btrfs_free_extent(struct btrfs_trans_handle *trans,
  2673. struct btrfs_root *root,
  2674. u64 bytenr, u64 num_bytes, u64 parent,
  2675. u64 root_objectid, u64 ref_generation,
  2676. u64 owner_objectid, int pin)
  2677. {
  2678. int ret;
  2679. ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, parent,
  2680. root_objectid, ref_generation,
  2681. owner_objectid, pin);
  2682. return ret;
  2683. }
  2684. static u64 stripe_align(struct btrfs_root *root, u64 val)
  2685. {
  2686. u64 mask = ((u64)root->stripesize - 1);
  2687. u64 ret = (val + mask) & ~mask;
  2688. return ret;
  2689. }
  2690. /*
  2691. * walks the btree of allocated extents and find a hole of a given size.
  2692. * The key ins is changed to record the hole:
  2693. * ins->objectid == block start
  2694. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  2695. * ins->offset == number of blocks
  2696. * Any available blocks before search_start are skipped.
  2697. */
  2698. static noinline int find_free_extent(struct btrfs_trans_handle *trans,
  2699. struct btrfs_root *orig_root,
  2700. u64 num_bytes, u64 empty_size,
  2701. u64 search_start, u64 search_end,
  2702. u64 hint_byte, struct btrfs_key *ins,
  2703. u64 exclude_start, u64 exclude_nr,
  2704. int data)
  2705. {
  2706. int ret = 0;
  2707. struct btrfs_root *root = orig_root->fs_info->extent_root;
  2708. u64 total_needed = num_bytes;
  2709. u64 *last_ptr = NULL;
  2710. u64 last_wanted = 0;
  2711. struct btrfs_block_group_cache *block_group = NULL;
  2712. int chunk_alloc_done = 0;
  2713. int empty_cluster = 2 * 1024 * 1024;
  2714. int allowed_chunk_alloc = 0;
  2715. struct list_head *head = NULL, *cur = NULL;
  2716. int loop = 0;
  2717. int extra_loop = 0;
  2718. struct btrfs_space_info *space_info;
  2719. WARN_ON(num_bytes < root->sectorsize);
  2720. btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
  2721. ins->objectid = 0;
  2722. ins->offset = 0;
  2723. if (orig_root->ref_cows || empty_size)
  2724. allowed_chunk_alloc = 1;
  2725. if (data & BTRFS_BLOCK_GROUP_METADATA) {
  2726. last_ptr = &root->fs_info->last_alloc;
  2727. if (!btrfs_test_opt(root, SSD))
  2728. empty_cluster = 64 * 1024;
  2729. }
  2730. if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
  2731. last_ptr = &root->fs_info->last_data_alloc;
  2732. if (last_ptr) {
  2733. if (*last_ptr) {
  2734. hint_byte = *last_ptr;
  2735. last_wanted = *last_ptr;
  2736. } else
  2737. empty_size += empty_cluster;
  2738. } else {
  2739. empty_cluster = 0;
  2740. }
  2741. search_start = max(search_start, first_logical_byte(root, 0));
  2742. search_start = max(search_start, hint_byte);
  2743. if (last_wanted && search_start != last_wanted) {
  2744. last_wanted = 0;
  2745. empty_size += empty_cluster;
  2746. }
  2747. total_needed += empty_size;
  2748. block_group = btrfs_lookup_block_group(root->fs_info, search_start);
  2749. if (!block_group)
  2750. block_group = btrfs_lookup_first_block_group(root->fs_info,
  2751. search_start);
  2752. space_info = __find_space_info(root->fs_info, data);
  2753. down_read(&space_info->groups_sem);
  2754. while (1) {
  2755. struct btrfs_free_space *free_space;
  2756. /*
  2757. * the only way this happens if our hint points to a block
  2758. * group thats not of the proper type, while looping this
  2759. * should never happen
  2760. */
  2761. if (empty_size)
  2762. extra_loop = 1;
  2763. if (!block_group)
  2764. goto new_group_no_lock;
  2765. if (unlikely(!block_group->cached)) {
  2766. mutex_lock(&block_group->cache_mutex);
  2767. ret = cache_block_group(root, block_group);
  2768. mutex_unlock(&block_group->cache_mutex);
  2769. if (ret)
  2770. break;
  2771. }
  2772. mutex_lock(&block_group->alloc_mutex);
  2773. if (unlikely(!block_group_bits(block_group, data)))
  2774. goto new_group;
  2775. if (unlikely(block_group->ro))
  2776. goto new_group;
  2777. free_space = btrfs_find_free_space(block_group, search_start,
  2778. total_needed);
  2779. if (free_space) {
  2780. u64 start = block_group->key.objectid;
  2781. u64 end = block_group->key.objectid +
  2782. block_group->key.offset;
  2783. search_start = stripe_align(root, free_space->offset);
  2784. /* move on to the next group */
  2785. if (search_start + num_bytes >= search_end)
  2786. goto new_group;
  2787. /* move on to the next group */
  2788. if (search_start + num_bytes > end)
  2789. goto new_group;
  2790. if (last_wanted && search_start != last_wanted) {
  2791. total_needed += empty_cluster;
  2792. empty_size += empty_cluster;
  2793. last_wanted = 0;
  2794. /*
  2795. * if search_start is still in this block group
  2796. * then we just re-search this block group
  2797. */
  2798. if (search_start >= start &&
  2799. search_start < end) {
  2800. mutex_unlock(&block_group->alloc_mutex);
  2801. continue;
  2802. }
  2803. /* else we go to the next block group */
  2804. goto new_group;
  2805. }
  2806. if (exclude_nr > 0 &&
  2807. (search_start + num_bytes > exclude_start &&
  2808. search_start < exclude_start + exclude_nr)) {
  2809. search_start = exclude_start + exclude_nr;
  2810. /*
  2811. * if search_start is still in this block group
  2812. * then we just re-search this block group
  2813. */
  2814. if (search_start >= start &&
  2815. search_start < end) {
  2816. mutex_unlock(&block_group->alloc_mutex);
  2817. last_wanted = 0;
  2818. continue;
  2819. }
  2820. /* else we go to the next block group */
  2821. goto new_group;
  2822. }
  2823. ins->objectid = search_start;
  2824. ins->offset = num_bytes;
  2825. btrfs_remove_free_space_lock(block_group, search_start,
  2826. num_bytes);
  2827. /* we are all good, lets return */
  2828. mutex_unlock(&block_group->alloc_mutex);
  2829. break;
  2830. }
  2831. new_group:
  2832. mutex_unlock(&block_group->alloc_mutex);
  2833. put_block_group(block_group);
  2834. block_group = NULL;
  2835. new_group_no_lock:
  2836. /* don't try to compare new allocations against the
  2837. * last allocation any more
  2838. */
  2839. last_wanted = 0;
  2840. /*
  2841. * Here's how this works.
  2842. * loop == 0: we were searching a block group via a hint
  2843. * and didn't find anything, so we start at
  2844. * the head of the block groups and keep searching
  2845. * loop == 1: we're searching through all of the block groups
  2846. * if we hit the head again we have searched
  2847. * all of the block groups for this space and we
  2848. * need to try and allocate, if we cant error out.
  2849. * loop == 2: we allocated more space and are looping through
  2850. * all of the block groups again.
  2851. */
  2852. if (loop == 0) {
  2853. head = &space_info->block_groups;
  2854. cur = head->next;
  2855. loop++;
  2856. } else if (loop == 1 && cur == head) {
  2857. int keep_going;
  2858. /* at this point we give up on the empty_size
  2859. * allocations and just try to allocate the min
  2860. * space.
  2861. *
  2862. * The extra_loop field was set if an empty_size
  2863. * allocation was attempted above, and if this
  2864. * is try we need to try the loop again without
  2865. * the additional empty_size.
  2866. */
  2867. total_needed -= empty_size;
  2868. empty_size = 0;
  2869. keep_going = extra_loop;
  2870. loop++;
  2871. if (allowed_chunk_alloc && !chunk_alloc_done) {
  2872. up_read(&space_info->groups_sem);
  2873. ret = do_chunk_alloc(trans, root, num_bytes +
  2874. 2 * 1024 * 1024, data, 1);
  2875. down_read(&space_info->groups_sem);
  2876. if (ret < 0)
  2877. goto loop_check;
  2878. head = &space_info->block_groups;
  2879. /*
  2880. * we've allocated a new chunk, keep
  2881. * trying
  2882. */
  2883. keep_going = 1;
  2884. chunk_alloc_done = 1;
  2885. } else if (!allowed_chunk_alloc) {
  2886. space_info->force_alloc = 1;
  2887. }
  2888. loop_check:
  2889. if (keep_going) {
  2890. cur = head->next;
  2891. extra_loop = 0;
  2892. } else {
  2893. break;
  2894. }
  2895. } else if (cur == head) {
  2896. break;
  2897. }
  2898. block_group = list_entry(cur, struct btrfs_block_group_cache,
  2899. list);
  2900. atomic_inc(&block_group->count);
  2901. search_start = block_group->key.objectid;
  2902. cur = cur->next;
  2903. }
  2904. /* we found what we needed */
  2905. if (ins->objectid) {
  2906. if (!(data & BTRFS_BLOCK_GROUP_DATA))
  2907. trans->block_group = block_group->key.objectid;
  2908. if (last_ptr)
  2909. *last_ptr = ins->objectid + ins->offset;
  2910. ret = 0;
  2911. } else if (!ret) {
  2912. printk(KERN_ERR "btrfs searching for %llu bytes, "
  2913. "num_bytes %llu, loop %d, allowed_alloc %d\n",
  2914. (unsigned long long)total_needed,
  2915. (unsigned long long)num_bytes,
  2916. loop, allowed_chunk_alloc);
  2917. ret = -ENOSPC;
  2918. }
  2919. if (block_group)
  2920. put_block_group(block_group);
  2921. up_read(&space_info->groups_sem);
  2922. return ret;
  2923. }
  2924. static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
  2925. {
  2926. struct btrfs_block_group_cache *cache;
  2927. printk(KERN_INFO "space_info has %llu free, is %sfull\n",
  2928. (unsigned long long)(info->total_bytes - info->bytes_used -
  2929. info->bytes_pinned - info->bytes_reserved),
  2930. (info->full) ? "" : "not ");
  2931. printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
  2932. " may_use=%llu, used=%llu\n", info->total_bytes,
  2933. info->bytes_pinned, info->bytes_delalloc, info->bytes_may_use,
  2934. info->bytes_used);
  2935. down_read(&info->groups_sem);
  2936. list_for_each_entry(cache, &info->block_groups, list) {
  2937. spin_lock(&cache->lock);
  2938. printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
  2939. "%llu pinned %llu reserved\n",
  2940. (unsigned long long)cache->key.objectid,
  2941. (unsigned long long)cache->key.offset,
  2942. (unsigned long long)btrfs_block_group_used(&cache->item),
  2943. (unsigned long long)cache->pinned,
  2944. (unsigned long long)cache->reserved);
  2945. btrfs_dump_free_space(cache, bytes);
  2946. spin_unlock(&cache->lock);
  2947. }
  2948. up_read(&info->groups_sem);
  2949. }
  2950. static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
  2951. struct btrfs_root *root,
  2952. u64 num_bytes, u64 min_alloc_size,
  2953. u64 empty_size, u64 hint_byte,
  2954. u64 search_end, struct btrfs_key *ins,
  2955. u64 data)
  2956. {
  2957. int ret;
  2958. u64 search_start = 0;
  2959. struct btrfs_fs_info *info = root->fs_info;
  2960. data = btrfs_get_alloc_profile(root, data);
  2961. again:
  2962. /*
  2963. * the only place that sets empty_size is btrfs_realloc_node, which
  2964. * is not called recursively on allocations
  2965. */
  2966. if (empty_size || root->ref_cows) {
  2967. if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
  2968. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  2969. 2 * 1024 * 1024,
  2970. BTRFS_BLOCK_GROUP_METADATA |
  2971. (info->metadata_alloc_profile &
  2972. info->avail_metadata_alloc_bits), 0);
  2973. }
  2974. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  2975. num_bytes + 2 * 1024 * 1024, data, 0);
  2976. }
  2977. WARN_ON(num_bytes < root->sectorsize);
  2978. ret = find_free_extent(trans, root, num_bytes, empty_size,
  2979. search_start, search_end, hint_byte, ins,
  2980. trans->alloc_exclude_start,
  2981. trans->alloc_exclude_nr, data);
  2982. if (ret == -ENOSPC && num_bytes > min_alloc_size) {
  2983. num_bytes = num_bytes >> 1;
  2984. num_bytes = num_bytes & ~(root->sectorsize - 1);
  2985. num_bytes = max(num_bytes, min_alloc_size);
  2986. do_chunk_alloc(trans, root->fs_info->extent_root,
  2987. num_bytes, data, 1);
  2988. goto again;
  2989. }
  2990. if (ret) {
  2991. struct btrfs_space_info *sinfo;
  2992. sinfo = __find_space_info(root->fs_info, data);
  2993. printk(KERN_ERR "btrfs allocation failed flags %llu, "
  2994. "wanted %llu\n", (unsigned long long)data,
  2995. (unsigned long long)num_bytes);
  2996. dump_space_info(sinfo, num_bytes);
  2997. BUG();
  2998. }
  2999. return ret;
  3000. }
  3001. int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
  3002. {
  3003. struct btrfs_block_group_cache *cache;
  3004. int ret = 0;
  3005. cache = btrfs_lookup_block_group(root->fs_info, start);
  3006. if (!cache) {
  3007. printk(KERN_ERR "Unable to find block group for %llu\n",
  3008. (unsigned long long)start);
  3009. return -ENOSPC;
  3010. }
  3011. ret = btrfs_discard_extent(root, start, len);
  3012. btrfs_add_free_space(cache, start, len);
  3013. put_block_group(cache);
  3014. update_reserved_extents(root, start, len, 0);
  3015. return ret;
  3016. }
  3017. int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
  3018. struct btrfs_root *root,
  3019. u64 num_bytes, u64 min_alloc_size,
  3020. u64 empty_size, u64 hint_byte,
  3021. u64 search_end, struct btrfs_key *ins,
  3022. u64 data)
  3023. {
  3024. int ret;
  3025. ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
  3026. empty_size, hint_byte, search_end, ins,
  3027. data);
  3028. update_reserved_extents(root, ins->objectid, ins->offset, 1);
  3029. return ret;
  3030. }
  3031. static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
  3032. struct btrfs_root *root, u64 parent,
  3033. u64 root_objectid, u64 ref_generation,
  3034. u64 owner, struct btrfs_key *ins)
  3035. {
  3036. int ret;
  3037. int pending_ret;
  3038. u64 super_used;
  3039. u64 root_used;
  3040. u64 num_bytes = ins->offset;
  3041. u32 sizes[2];
  3042. struct btrfs_fs_info *info = root->fs_info;
  3043. struct btrfs_root *extent_root = info->extent_root;
  3044. struct btrfs_extent_item *extent_item;
  3045. struct btrfs_extent_ref *ref;
  3046. struct btrfs_path *path;
  3047. struct btrfs_key keys[2];
  3048. if (parent == 0)
  3049. parent = ins->objectid;
  3050. /* block accounting for super block */
  3051. spin_lock(&info->delalloc_lock);
  3052. super_used = btrfs_super_bytes_used(&info->super_copy);
  3053. btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
  3054. /* block accounting for root item */
  3055. root_used = btrfs_root_used(&root->root_item);
  3056. btrfs_set_root_used(&root->root_item, root_used + num_bytes);
  3057. spin_unlock(&info->delalloc_lock);
  3058. if (root == extent_root) {
  3059. struct pending_extent_op *extent_op;
  3060. extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
  3061. BUG_ON(!extent_op);
  3062. extent_op->type = PENDING_EXTENT_INSERT;
  3063. extent_op->bytenr = ins->objectid;
  3064. extent_op->num_bytes = ins->offset;
  3065. extent_op->parent = parent;
  3066. extent_op->orig_parent = 0;
  3067. extent_op->generation = ref_generation;
  3068. extent_op->orig_generation = 0;
  3069. extent_op->level = (int)owner;
  3070. INIT_LIST_HEAD(&extent_op->list);
  3071. extent_op->del = 0;
  3072. mutex_lock(&root->fs_info->extent_ins_mutex);
  3073. set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
  3074. ins->objectid + ins->offset - 1,
  3075. EXTENT_WRITEBACK, GFP_NOFS);
  3076. set_state_private(&root->fs_info->extent_ins,
  3077. ins->objectid, (unsigned long)extent_op);
  3078. mutex_unlock(&root->fs_info->extent_ins_mutex);
  3079. goto update_block;
  3080. }
  3081. memcpy(&keys[0], ins, sizeof(*ins));
  3082. keys[1].objectid = ins->objectid;
  3083. keys[1].type = BTRFS_EXTENT_REF_KEY;
  3084. keys[1].offset = parent;
  3085. sizes[0] = sizeof(*extent_item);
  3086. sizes[1] = sizeof(*ref);
  3087. path = btrfs_alloc_path();
  3088. BUG_ON(!path);
  3089. ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
  3090. sizes, 2);
  3091. BUG_ON(ret);
  3092. extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
  3093. struct btrfs_extent_item);
  3094. btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
  3095. ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
  3096. struct btrfs_extent_ref);
  3097. btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
  3098. btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
  3099. btrfs_set_ref_objectid(path->nodes[0], ref, owner);
  3100. btrfs_set_ref_num_refs(path->nodes[0], ref, 1);
  3101. btrfs_mark_buffer_dirty(path->nodes[0]);
  3102. trans->alloc_exclude_start = 0;
  3103. trans->alloc_exclude_nr = 0;
  3104. btrfs_free_path(path);
  3105. finish_current_insert(trans, extent_root, 0);
  3106. pending_ret = del_pending_extents(trans, extent_root, 0);
  3107. if (ret)
  3108. goto out;
  3109. if (pending_ret) {
  3110. ret = pending_ret;
  3111. goto out;
  3112. }
  3113. update_block:
  3114. ret = update_block_group(trans, root, ins->objectid,
  3115. ins->offset, 1, 0);
  3116. if (ret) {
  3117. printk(KERN_ERR "btrfs update block group failed for %llu "
  3118. "%llu\n", (unsigned long long)ins->objectid,
  3119. (unsigned long long)ins->offset);
  3120. BUG();
  3121. }
  3122. out:
  3123. return ret;
  3124. }
  3125. int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
  3126. struct btrfs_root *root, u64 parent,
  3127. u64 root_objectid, u64 ref_generation,
  3128. u64 owner, struct btrfs_key *ins)
  3129. {
  3130. int ret;
  3131. if (root_objectid == BTRFS_TREE_LOG_OBJECTID)
  3132. return 0;
  3133. ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
  3134. ref_generation, owner, ins);
  3135. update_reserved_extents(root, ins->objectid, ins->offset, 0);
  3136. return ret;
  3137. }
  3138. /*
  3139. * this is used by the tree logging recovery code. It records that
  3140. * an extent has been allocated and makes sure to clear the free
  3141. * space cache bits as well
  3142. */
  3143. int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
  3144. struct btrfs_root *root, u64 parent,
  3145. u64 root_objectid, u64 ref_generation,
  3146. u64 owner, struct btrfs_key *ins)
  3147. {
  3148. int ret;
  3149. struct btrfs_block_group_cache *block_group;
  3150. block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
  3151. mutex_lock(&block_group->cache_mutex);
  3152. cache_block_group(root, block_group);
  3153. mutex_unlock(&block_group->cache_mutex);
  3154. ret = btrfs_remove_free_space(block_group, ins->objectid,
  3155. ins->offset);
  3156. BUG_ON(ret);
  3157. put_block_group(block_group);
  3158. ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
  3159. ref_generation, owner, ins);
  3160. return ret;
  3161. }
  3162. /*
  3163. * finds a free extent and does all the dirty work required for allocation
  3164. * returns the key for the extent through ins, and a tree buffer for
  3165. * the first block of the extent through buf.
  3166. *
  3167. * returns 0 if everything worked, non-zero otherwise.
  3168. */
  3169. int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
  3170. struct btrfs_root *root,
  3171. u64 num_bytes, u64 parent, u64 min_alloc_size,
  3172. u64 root_objectid, u64 ref_generation,
  3173. u64 owner_objectid, u64 empty_size, u64 hint_byte,
  3174. u64 search_end, struct btrfs_key *ins, u64 data)
  3175. {
  3176. int ret;
  3177. ret = __btrfs_reserve_extent(trans, root, num_bytes,
  3178. min_alloc_size, empty_size, hint_byte,
  3179. search_end, ins, data);
  3180. BUG_ON(ret);
  3181. if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
  3182. ret = __btrfs_alloc_reserved_extent(trans, root, parent,
  3183. root_objectid, ref_generation,
  3184. owner_objectid, ins);
  3185. BUG_ON(ret);
  3186. } else {
  3187. update_reserved_extents(root, ins->objectid, ins->offset, 1);
  3188. }
  3189. return ret;
  3190. }
  3191. struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
  3192. struct btrfs_root *root,
  3193. u64 bytenr, u32 blocksize,
  3194. int level)
  3195. {
  3196. struct extent_buffer *buf;
  3197. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  3198. if (!buf)
  3199. return ERR_PTR(-ENOMEM);
  3200. btrfs_set_header_generation(buf, trans->transid);
  3201. btrfs_set_buffer_lockdep_class(buf, level);
  3202. btrfs_tree_lock(buf);
  3203. clean_tree_block(trans, root, buf);
  3204. btrfs_set_lock_blocking(buf);
  3205. btrfs_set_buffer_uptodate(buf);
  3206. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  3207. set_extent_dirty(&root->dirty_log_pages, buf->start,
  3208. buf->start + buf->len - 1, GFP_NOFS);
  3209. } else {
  3210. set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
  3211. buf->start + buf->len - 1, GFP_NOFS);
  3212. }
  3213. trans->blocks_used++;
  3214. /* this returns a buffer locked for blocking */
  3215. return buf;
  3216. }
  3217. /*
  3218. * helper function to allocate a block for a given tree
  3219. * returns the tree buffer or NULL.
  3220. */
  3221. struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
  3222. struct btrfs_root *root,
  3223. u32 blocksize, u64 parent,
  3224. u64 root_objectid,
  3225. u64 ref_generation,
  3226. int level,
  3227. u64 hint,
  3228. u64 empty_size)
  3229. {
  3230. struct btrfs_key ins;
  3231. int ret;
  3232. struct extent_buffer *buf;
  3233. ret = btrfs_alloc_extent(trans, root, blocksize, parent, blocksize,
  3234. root_objectid, ref_generation, level,
  3235. empty_size, hint, (u64)-1, &ins, 0);
  3236. if (ret) {
  3237. BUG_ON(ret > 0);
  3238. return ERR_PTR(ret);
  3239. }
  3240. buf = btrfs_init_new_buffer(trans, root, ins.objectid,
  3241. blocksize, level);
  3242. return buf;
  3243. }
  3244. int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
  3245. struct btrfs_root *root, struct extent_buffer *leaf)
  3246. {
  3247. u64 leaf_owner;
  3248. u64 leaf_generation;
  3249. struct refsort *sorted;
  3250. struct btrfs_key key;
  3251. struct btrfs_file_extent_item *fi;
  3252. int i;
  3253. int nritems;
  3254. int ret;
  3255. int refi = 0;
  3256. int slot;
  3257. BUG_ON(!btrfs_is_leaf(leaf));
  3258. nritems = btrfs_header_nritems(leaf);
  3259. leaf_owner = btrfs_header_owner(leaf);
  3260. leaf_generation = btrfs_header_generation(leaf);
  3261. sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
  3262. /* we do this loop twice. The first time we build a list
  3263. * of the extents we have a reference on, then we sort the list
  3264. * by bytenr. The second time around we actually do the
  3265. * extent freeing.
  3266. */
  3267. for (i = 0; i < nritems; i++) {
  3268. u64 disk_bytenr;
  3269. cond_resched();
  3270. btrfs_item_key_to_cpu(leaf, &key, i);
  3271. /* only extents have references, skip everything else */
  3272. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  3273. continue;
  3274. fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
  3275. /* inline extents live in the btree, they don't have refs */
  3276. if (btrfs_file_extent_type(leaf, fi) ==
  3277. BTRFS_FILE_EXTENT_INLINE)
  3278. continue;
  3279. disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  3280. /* holes don't have refs */
  3281. if (disk_bytenr == 0)
  3282. continue;
  3283. sorted[refi].bytenr = disk_bytenr;
  3284. sorted[refi].slot = i;
  3285. refi++;
  3286. }
  3287. if (refi == 0)
  3288. goto out;
  3289. sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
  3290. for (i = 0; i < refi; i++) {
  3291. u64 disk_bytenr;
  3292. disk_bytenr = sorted[i].bytenr;
  3293. slot = sorted[i].slot;
  3294. cond_resched();
  3295. btrfs_item_key_to_cpu(leaf, &key, slot);
  3296. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  3297. continue;
  3298. fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
  3299. ret = __btrfs_free_extent(trans, root, disk_bytenr,
  3300. btrfs_file_extent_disk_num_bytes(leaf, fi),
  3301. leaf->start, leaf_owner, leaf_generation,
  3302. key.objectid, 0);
  3303. BUG_ON(ret);
  3304. atomic_inc(&root->fs_info->throttle_gen);
  3305. wake_up(&root->fs_info->transaction_throttle);
  3306. cond_resched();
  3307. }
  3308. out:
  3309. kfree(sorted);
  3310. return 0;
  3311. }
  3312. static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
  3313. struct btrfs_root *root,
  3314. struct btrfs_leaf_ref *ref)
  3315. {
  3316. int i;
  3317. int ret;
  3318. struct btrfs_extent_info *info;
  3319. struct refsort *sorted;
  3320. if (ref->nritems == 0)
  3321. return 0;
  3322. sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
  3323. for (i = 0; i < ref->nritems; i++) {
  3324. sorted[i].bytenr = ref->extents[i].bytenr;
  3325. sorted[i].slot = i;
  3326. }
  3327. sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
  3328. /*
  3329. * the items in the ref were sorted when the ref was inserted
  3330. * into the ref cache, so this is already in order
  3331. */
  3332. for (i = 0; i < ref->nritems; i++) {
  3333. info = ref->extents + sorted[i].slot;
  3334. ret = __btrfs_free_extent(trans, root, info->bytenr,
  3335. info->num_bytes, ref->bytenr,
  3336. ref->owner, ref->generation,
  3337. info->objectid, 0);
  3338. atomic_inc(&root->fs_info->throttle_gen);
  3339. wake_up(&root->fs_info->transaction_throttle);
  3340. cond_resched();
  3341. BUG_ON(ret);
  3342. info++;
  3343. }
  3344. kfree(sorted);
  3345. return 0;
  3346. }
  3347. static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start,
  3348. u64 len, u32 *refs)
  3349. {
  3350. int ret;
  3351. ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs);
  3352. BUG_ON(ret);
  3353. #if 0 /* some debugging code in case we see problems here */
  3354. /* if the refs count is one, it won't get increased again. But
  3355. * if the ref count is > 1, someone may be decreasing it at
  3356. * the same time we are.
  3357. */
  3358. if (*refs != 1) {
  3359. struct extent_buffer *eb = NULL;
  3360. eb = btrfs_find_create_tree_block(root, start, len);
  3361. if (eb)
  3362. btrfs_tree_lock(eb);
  3363. mutex_lock(&root->fs_info->alloc_mutex);
  3364. ret = lookup_extent_ref(NULL, root, start, len, refs);
  3365. BUG_ON(ret);
  3366. mutex_unlock(&root->fs_info->alloc_mutex);
  3367. if (eb) {
  3368. btrfs_tree_unlock(eb);
  3369. free_extent_buffer(eb);
  3370. }
  3371. if (*refs == 1) {
  3372. printk(KERN_ERR "btrfs block %llu went down to one "
  3373. "during drop_snap\n", (unsigned long long)start);
  3374. }
  3375. }
  3376. #endif
  3377. cond_resched();
  3378. return ret;
  3379. }
  3380. /*
  3381. * this is used while deleting old snapshots, and it drops the refs
  3382. * on a whole subtree starting from a level 1 node.
  3383. *
  3384. * The idea is to sort all the leaf pointers, and then drop the
  3385. * ref on all the leaves in order. Most of the time the leaves
  3386. * will have ref cache entries, so no leaf IOs will be required to
  3387. * find the extents they have references on.
  3388. *
  3389. * For each leaf, any references it has are also dropped in order
  3390. *
  3391. * This ends up dropping the references in something close to optimal
  3392. * order for reading and modifying the extent allocation tree.
  3393. */
  3394. static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
  3395. struct btrfs_root *root,
  3396. struct btrfs_path *path)
  3397. {
  3398. u64 bytenr;
  3399. u64 root_owner;
  3400. u64 root_gen;
  3401. struct extent_buffer *eb = path->nodes[1];
  3402. struct extent_buffer *leaf;
  3403. struct btrfs_leaf_ref *ref;
  3404. struct refsort *sorted = NULL;
  3405. int nritems = btrfs_header_nritems(eb);
  3406. int ret;
  3407. int i;
  3408. int refi = 0;
  3409. int slot = path->slots[1];
  3410. u32 blocksize = btrfs_level_size(root, 0);
  3411. u32 refs;
  3412. if (nritems == 0)
  3413. goto out;
  3414. root_owner = btrfs_header_owner(eb);
  3415. root_gen = btrfs_header_generation(eb);
  3416. sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
  3417. /*
  3418. * step one, sort all the leaf pointers so we don't scribble
  3419. * randomly into the extent allocation tree
  3420. */
  3421. for (i = slot; i < nritems; i++) {
  3422. sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
  3423. sorted[refi].slot = i;
  3424. refi++;
  3425. }
  3426. /*
  3427. * nritems won't be zero, but if we're picking up drop_snapshot
  3428. * after a crash, slot might be > 0, so double check things
  3429. * just in case.
  3430. */
  3431. if (refi == 0)
  3432. goto out;
  3433. sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
  3434. /*
  3435. * the first loop frees everything the leaves point to
  3436. */
  3437. for (i = 0; i < refi; i++) {
  3438. u64 ptr_gen;
  3439. bytenr = sorted[i].bytenr;
  3440. /*
  3441. * check the reference count on this leaf. If it is > 1
  3442. * we just decrement it below and don't update any
  3443. * of the refs the leaf points to.
  3444. */
  3445. ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
  3446. BUG_ON(ret);
  3447. if (refs != 1)
  3448. continue;
  3449. ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
  3450. /*
  3451. * the leaf only had one reference, which means the
  3452. * only thing pointing to this leaf is the snapshot
  3453. * we're deleting. It isn't possible for the reference
  3454. * count to increase again later
  3455. *
  3456. * The reference cache is checked for the leaf,
  3457. * and if found we'll be able to drop any refs held by
  3458. * the leaf without needing to read it in.
  3459. */
  3460. ref = btrfs_lookup_leaf_ref(root, bytenr);
  3461. if (ref && ref->generation != ptr_gen) {
  3462. btrfs_free_leaf_ref(root, ref);
  3463. ref = NULL;
  3464. }
  3465. if (ref) {
  3466. ret = cache_drop_leaf_ref(trans, root, ref);
  3467. BUG_ON(ret);
  3468. btrfs_remove_leaf_ref(root, ref);
  3469. btrfs_free_leaf_ref(root, ref);
  3470. } else {
  3471. /*
  3472. * the leaf wasn't in the reference cache, so
  3473. * we have to read it.
  3474. */
  3475. leaf = read_tree_block(root, bytenr, blocksize,
  3476. ptr_gen);
  3477. ret = btrfs_drop_leaf_ref(trans, root, leaf);
  3478. BUG_ON(ret);
  3479. free_extent_buffer(leaf);
  3480. }
  3481. atomic_inc(&root->fs_info->throttle_gen);
  3482. wake_up(&root->fs_info->transaction_throttle);
  3483. cond_resched();
  3484. }
  3485. /*
  3486. * run through the loop again to free the refs on the leaves.
  3487. * This is faster than doing it in the loop above because
  3488. * the leaves are likely to be clustered together. We end up
  3489. * working in nice chunks on the extent allocation tree.
  3490. */
  3491. for (i = 0; i < refi; i++) {
  3492. bytenr = sorted[i].bytenr;
  3493. ret = __btrfs_free_extent(trans, root, bytenr,
  3494. blocksize, eb->start,
  3495. root_owner, root_gen, 0, 1);
  3496. BUG_ON(ret);
  3497. atomic_inc(&root->fs_info->throttle_gen);
  3498. wake_up(&root->fs_info->transaction_throttle);
  3499. cond_resched();
  3500. }
  3501. out:
  3502. kfree(sorted);
  3503. /*
  3504. * update the path to show we've processed the entire level 1
  3505. * node. This will get saved into the root's drop_snapshot_progress
  3506. * field so these drops are not repeated again if this transaction
  3507. * commits.
  3508. */
  3509. path->slots[1] = nritems;
  3510. return 0;
  3511. }
  3512. /*
  3513. * helper function for drop_snapshot, this walks down the tree dropping ref
  3514. * counts as it goes.
  3515. */
  3516. static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
  3517. struct btrfs_root *root,
  3518. struct btrfs_path *path, int *level)
  3519. {
  3520. u64 root_owner;
  3521. u64 root_gen;
  3522. u64 bytenr;
  3523. u64 ptr_gen;
  3524. struct extent_buffer *next;
  3525. struct extent_buffer *cur;
  3526. struct extent_buffer *parent;
  3527. u32 blocksize;
  3528. int ret;
  3529. u32 refs;
  3530. WARN_ON(*level < 0);
  3531. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  3532. ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
  3533. path->nodes[*level]->len, &refs);
  3534. BUG_ON(ret);
  3535. if (refs > 1)
  3536. goto out;
  3537. /*
  3538. * walk down to the last node level and free all the leaves
  3539. */
  3540. while (*level >= 0) {
  3541. WARN_ON(*level < 0);
  3542. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  3543. cur = path->nodes[*level];
  3544. if (btrfs_header_level(cur) != *level)
  3545. WARN_ON(1);
  3546. if (path->slots[*level] >=
  3547. btrfs_header_nritems(cur))
  3548. break;
  3549. /* the new code goes down to level 1 and does all the
  3550. * leaves pointed to that node in bulk. So, this check
  3551. * for level 0 will always be false.
  3552. *
  3553. * But, the disk format allows the drop_snapshot_progress
  3554. * field in the root to leave things in a state where
  3555. * a leaf will need cleaning up here. If someone crashes
  3556. * with the old code and then boots with the new code,
  3557. * we might find a leaf here.
  3558. */
  3559. if (*level == 0) {
  3560. ret = btrfs_drop_leaf_ref(trans, root, cur);
  3561. BUG_ON(ret);
  3562. break;
  3563. }
  3564. /*
  3565. * once we get to level one, process the whole node
  3566. * at once, including everything below it.
  3567. */
  3568. if (*level == 1) {
  3569. ret = drop_level_one_refs(trans, root, path);
  3570. BUG_ON(ret);
  3571. break;
  3572. }
  3573. bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
  3574. ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
  3575. blocksize = btrfs_level_size(root, *level - 1);
  3576. ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
  3577. BUG_ON(ret);
  3578. /*
  3579. * if there is more than one reference, we don't need
  3580. * to read that node to drop any references it has. We
  3581. * just drop the ref we hold on that node and move on to the
  3582. * next slot in this level.
  3583. */
  3584. if (refs != 1) {
  3585. parent = path->nodes[*level];
  3586. root_owner = btrfs_header_owner(parent);
  3587. root_gen = btrfs_header_generation(parent);
  3588. path->slots[*level]++;
  3589. ret = __btrfs_free_extent(trans, root, bytenr,
  3590. blocksize, parent->start,
  3591. root_owner, root_gen,
  3592. *level - 1, 1);
  3593. BUG_ON(ret);
  3594. atomic_inc(&root->fs_info->throttle_gen);
  3595. wake_up(&root->fs_info->transaction_throttle);
  3596. cond_resched();
  3597. continue;
  3598. }
  3599. /*
  3600. * we need to keep freeing things in the next level down.
  3601. * read the block and loop around to process it
  3602. */
  3603. next = read_tree_block(root, bytenr, blocksize, ptr_gen);
  3604. WARN_ON(*level <= 0);
  3605. if (path->nodes[*level-1])
  3606. free_extent_buffer(path->nodes[*level-1]);
  3607. path->nodes[*level-1] = next;
  3608. *level = btrfs_header_level(next);
  3609. path->slots[*level] = 0;
  3610. cond_resched();
  3611. }
  3612. out:
  3613. WARN_ON(*level < 0);
  3614. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  3615. if (path->nodes[*level] == root->node) {
  3616. parent = path->nodes[*level];
  3617. bytenr = path->nodes[*level]->start;
  3618. } else {
  3619. parent = path->nodes[*level + 1];
  3620. bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
  3621. }
  3622. blocksize = btrfs_level_size(root, *level);
  3623. root_owner = btrfs_header_owner(parent);
  3624. root_gen = btrfs_header_generation(parent);
  3625. /*
  3626. * cleanup and free the reference on the last node
  3627. * we processed
  3628. */
  3629. ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
  3630. parent->start, root_owner, root_gen,
  3631. *level, 1);
  3632. free_extent_buffer(path->nodes[*level]);
  3633. path->nodes[*level] = NULL;
  3634. *level += 1;
  3635. BUG_ON(ret);
  3636. cond_resched();
  3637. return 0;
  3638. }
  3639. /*
  3640. * helper function for drop_subtree, this function is similar to
  3641. * walk_down_tree. The main difference is that it checks reference
  3642. * counts while tree blocks are locked.
  3643. */
  3644. static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
  3645. struct btrfs_root *root,
  3646. struct btrfs_path *path, int *level)
  3647. {
  3648. struct extent_buffer *next;
  3649. struct extent_buffer *cur;
  3650. struct extent_buffer *parent;
  3651. u64 bytenr;
  3652. u64 ptr_gen;
  3653. u32 blocksize;
  3654. u32 refs;
  3655. int ret;
  3656. cur = path->nodes[*level];
  3657. ret = btrfs_lookup_extent_ref(trans, root, cur->start, cur->len,
  3658. &refs);
  3659. BUG_ON(ret);
  3660. if (refs > 1)
  3661. goto out;
  3662. while (*level >= 0) {
  3663. cur = path->nodes[*level];
  3664. if (*level == 0) {
  3665. ret = btrfs_drop_leaf_ref(trans, root, cur);
  3666. BUG_ON(ret);
  3667. clean_tree_block(trans, root, cur);
  3668. break;
  3669. }
  3670. if (path->slots[*level] >= btrfs_header_nritems(cur)) {
  3671. clean_tree_block(trans, root, cur);
  3672. break;
  3673. }
  3674. bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
  3675. blocksize = btrfs_level_size(root, *level - 1);
  3676. ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
  3677. next = read_tree_block(root, bytenr, blocksize, ptr_gen);
  3678. btrfs_tree_lock(next);
  3679. btrfs_set_lock_blocking(next);
  3680. ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
  3681. &refs);
  3682. BUG_ON(ret);
  3683. if (refs > 1) {
  3684. parent = path->nodes[*level];
  3685. ret = btrfs_free_extent(trans, root, bytenr,
  3686. blocksize, parent->start,
  3687. btrfs_header_owner(parent),
  3688. btrfs_header_generation(parent),
  3689. *level - 1, 1);
  3690. BUG_ON(ret);
  3691. path->slots[*level]++;
  3692. btrfs_tree_unlock(next);
  3693. free_extent_buffer(next);
  3694. continue;
  3695. }
  3696. *level = btrfs_header_level(next);
  3697. path->nodes[*level] = next;
  3698. path->slots[*level] = 0;
  3699. path->locks[*level] = 1;
  3700. cond_resched();
  3701. }
  3702. out:
  3703. parent = path->nodes[*level + 1];
  3704. bytenr = path->nodes[*level]->start;
  3705. blocksize = path->nodes[*level]->len;
  3706. ret = btrfs_free_extent(trans, root, bytenr, blocksize,
  3707. parent->start, btrfs_header_owner(parent),
  3708. btrfs_header_generation(parent), *level, 1);
  3709. BUG_ON(ret);
  3710. if (path->locks[*level]) {
  3711. btrfs_tree_unlock(path->nodes[*level]);
  3712. path->locks[*level] = 0;
  3713. }
  3714. free_extent_buffer(path->nodes[*level]);
  3715. path->nodes[*level] = NULL;
  3716. *level += 1;
  3717. cond_resched();
  3718. return 0;
  3719. }
  3720. /*
  3721. * helper for dropping snapshots. This walks back up the tree in the path
  3722. * to find the first node higher up where we haven't yet gone through
  3723. * all the slots
  3724. */
  3725. static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  3726. struct btrfs_root *root,
  3727. struct btrfs_path *path,
  3728. int *level, int max_level)
  3729. {
  3730. u64 root_owner;
  3731. u64 root_gen;
  3732. struct btrfs_root_item *root_item = &root->root_item;
  3733. int i;
  3734. int slot;
  3735. int ret;
  3736. for (i = *level; i < max_level && path->nodes[i]; i++) {
  3737. slot = path->slots[i];
  3738. if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
  3739. struct extent_buffer *node;
  3740. struct btrfs_disk_key disk_key;
  3741. /*
  3742. * there is more work to do in this level.
  3743. * Update the drop_progress marker to reflect
  3744. * the work we've done so far, and then bump
  3745. * the slot number
  3746. */
  3747. node = path->nodes[i];
  3748. path->slots[i]++;
  3749. *level = i;
  3750. WARN_ON(*level == 0);
  3751. btrfs_node_key(node, &disk_key, path->slots[i]);
  3752. memcpy(&root_item->drop_progress,
  3753. &disk_key, sizeof(disk_key));
  3754. root_item->drop_level = i;
  3755. return 0;
  3756. } else {
  3757. struct extent_buffer *parent;
  3758. /*
  3759. * this whole node is done, free our reference
  3760. * on it and go up one level
  3761. */
  3762. if (path->nodes[*level] == root->node)
  3763. parent = path->nodes[*level];
  3764. else
  3765. parent = path->nodes[*level + 1];
  3766. root_owner = btrfs_header_owner(parent);
  3767. root_gen = btrfs_header_generation(parent);
  3768. clean_tree_block(trans, root, path->nodes[*level]);
  3769. ret = btrfs_free_extent(trans, root,
  3770. path->nodes[*level]->start,
  3771. path->nodes[*level]->len,
  3772. parent->start, root_owner,
  3773. root_gen, *level, 1);
  3774. BUG_ON(ret);
  3775. if (path->locks[*level]) {
  3776. btrfs_tree_unlock(path->nodes[*level]);
  3777. path->locks[*level] = 0;
  3778. }
  3779. free_extent_buffer(path->nodes[*level]);
  3780. path->nodes[*level] = NULL;
  3781. *level = i + 1;
  3782. }
  3783. }
  3784. return 1;
  3785. }
  3786. /*
  3787. * drop the reference count on the tree rooted at 'snap'. This traverses
  3788. * the tree freeing any blocks that have a ref count of zero after being
  3789. * decremented.
  3790. */
  3791. int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
  3792. *root)
  3793. {
  3794. int ret = 0;
  3795. int wret;
  3796. int level;
  3797. struct btrfs_path *path;
  3798. int i;
  3799. int orig_level;
  3800. struct btrfs_root_item *root_item = &root->root_item;
  3801. WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
  3802. path = btrfs_alloc_path();
  3803. BUG_ON(!path);
  3804. level = btrfs_header_level(root->node);
  3805. orig_level = level;
  3806. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  3807. path->nodes[level] = root->node;
  3808. extent_buffer_get(root->node);
  3809. path->slots[level] = 0;
  3810. } else {
  3811. struct btrfs_key key;
  3812. struct btrfs_disk_key found_key;
  3813. struct extent_buffer *node;
  3814. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  3815. level = root_item->drop_level;
  3816. path->lowest_level = level;
  3817. wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  3818. if (wret < 0) {
  3819. ret = wret;
  3820. goto out;
  3821. }
  3822. node = path->nodes[level];
  3823. btrfs_node_key(node, &found_key, path->slots[level]);
  3824. WARN_ON(memcmp(&found_key, &root_item->drop_progress,
  3825. sizeof(found_key)));
  3826. /*
  3827. * unlock our path, this is safe because only this
  3828. * function is allowed to delete this snapshot
  3829. */
  3830. for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
  3831. if (path->nodes[i] && path->locks[i]) {
  3832. path->locks[i] = 0;
  3833. btrfs_tree_unlock(path->nodes[i]);
  3834. }
  3835. }
  3836. }
  3837. while (1) {
  3838. wret = walk_down_tree(trans, root, path, &level);
  3839. if (wret > 0)
  3840. break;
  3841. if (wret < 0)
  3842. ret = wret;
  3843. wret = walk_up_tree(trans, root, path, &level,
  3844. BTRFS_MAX_LEVEL);
  3845. if (wret > 0)
  3846. break;
  3847. if (wret < 0)
  3848. ret = wret;
  3849. if (trans->transaction->in_commit) {
  3850. ret = -EAGAIN;
  3851. break;
  3852. }
  3853. atomic_inc(&root->fs_info->throttle_gen);
  3854. wake_up(&root->fs_info->transaction_throttle);
  3855. }
  3856. for (i = 0; i <= orig_level; i++) {
  3857. if (path->nodes[i]) {
  3858. free_extent_buffer(path->nodes[i]);
  3859. path->nodes[i] = NULL;
  3860. }
  3861. }
  3862. out:
  3863. btrfs_free_path(path);
  3864. return ret;
  3865. }
  3866. int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
  3867. struct btrfs_root *root,
  3868. struct extent_buffer *node,
  3869. struct extent_buffer *parent)
  3870. {
  3871. struct btrfs_path *path;
  3872. int level;
  3873. int parent_level;
  3874. int ret = 0;
  3875. int wret;
  3876. path = btrfs_alloc_path();
  3877. BUG_ON(!path);
  3878. BUG_ON(!btrfs_tree_locked(parent));
  3879. parent_level = btrfs_header_level(parent);
  3880. extent_buffer_get(parent);
  3881. path->nodes[parent_level] = parent;
  3882. path->slots[parent_level] = btrfs_header_nritems(parent);
  3883. BUG_ON(!btrfs_tree_locked(node));
  3884. level = btrfs_header_level(node);
  3885. extent_buffer_get(node);
  3886. path->nodes[level] = node;
  3887. path->slots[level] = 0;
  3888. while (1) {
  3889. wret = walk_down_subtree(trans, root, path, &level);
  3890. if (wret < 0)
  3891. ret = wret;
  3892. if (wret != 0)
  3893. break;
  3894. wret = walk_up_tree(trans, root, path, &level, parent_level);
  3895. if (wret < 0)
  3896. ret = wret;
  3897. if (wret != 0)
  3898. break;
  3899. }
  3900. btrfs_free_path(path);
  3901. return ret;
  3902. }
  3903. static unsigned long calc_ra(unsigned long start, unsigned long last,
  3904. unsigned long nr)
  3905. {
  3906. return min(last, start + nr - 1);
  3907. }
  3908. static noinline int relocate_inode_pages(struct inode *inode, u64 start,
  3909. u64 len)
  3910. {
  3911. u64 page_start;
  3912. u64 page_end;
  3913. unsigned long first_index;
  3914. unsigned long last_index;
  3915. unsigned long i;
  3916. struct page *page;
  3917. struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
  3918. struct file_ra_state *ra;
  3919. struct btrfs_ordered_extent *ordered;
  3920. unsigned int total_read = 0;
  3921. unsigned int total_dirty = 0;
  3922. int ret = 0;
  3923. ra = kzalloc(sizeof(*ra), GFP_NOFS);
  3924. mutex_lock(&inode->i_mutex);
  3925. first_index = start >> PAGE_CACHE_SHIFT;
  3926. last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
  3927. /* make sure the dirty trick played by the caller work */
  3928. ret = invalidate_inode_pages2_range(inode->i_mapping,
  3929. first_index, last_index);
  3930. if (ret)
  3931. goto out_unlock;
  3932. file_ra_state_init(ra, inode->i_mapping);
  3933. for (i = first_index ; i <= last_index; i++) {
  3934. if (total_read % ra->ra_pages == 0) {
  3935. btrfs_force_ra(inode->i_mapping, ra, NULL, i,
  3936. calc_ra(i, last_index, ra->ra_pages));
  3937. }
  3938. total_read++;
  3939. again:
  3940. if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
  3941. BUG_ON(1);
  3942. page = grab_cache_page(inode->i_mapping, i);
  3943. if (!page) {
  3944. ret = -ENOMEM;
  3945. goto out_unlock;
  3946. }
  3947. if (!PageUptodate(page)) {
  3948. btrfs_readpage(NULL, page);
  3949. lock_page(page);
  3950. if (!PageUptodate(page)) {
  3951. unlock_page(page);
  3952. page_cache_release(page);
  3953. ret = -EIO;
  3954. goto out_unlock;
  3955. }
  3956. }
  3957. wait_on_page_writeback(page);
  3958. page_start = (u64)page->index << PAGE_CACHE_SHIFT;
  3959. page_end = page_start + PAGE_CACHE_SIZE - 1;
  3960. lock_extent(io_tree, page_start, page_end, GFP_NOFS);
  3961. ordered = btrfs_lookup_ordered_extent(inode, page_start);
  3962. if (ordered) {
  3963. unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
  3964. unlock_page(page);
  3965. page_cache_release(page);
  3966. btrfs_start_ordered_extent(inode, ordered, 1);
  3967. btrfs_put_ordered_extent(ordered);
  3968. goto again;
  3969. }
  3970. set_page_extent_mapped(page);
  3971. if (i == first_index)
  3972. set_extent_bits(io_tree, page_start, page_end,
  3973. EXTENT_BOUNDARY, GFP_NOFS);
  3974. btrfs_set_extent_delalloc(inode, page_start, page_end);
  3975. set_page_dirty(page);
  3976. total_dirty++;
  3977. unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
  3978. unlock_page(page);
  3979. page_cache_release(page);
  3980. }
  3981. out_unlock:
  3982. kfree(ra);
  3983. mutex_unlock(&inode->i_mutex);
  3984. balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
  3985. return ret;
  3986. }
  3987. static noinline int relocate_data_extent(struct inode *reloc_inode,
  3988. struct btrfs_key *extent_key,
  3989. u64 offset)
  3990. {
  3991. struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
  3992. struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
  3993. struct extent_map *em;
  3994. u64 start = extent_key->objectid - offset;
  3995. u64 end = start + extent_key->offset - 1;
  3996. em = alloc_extent_map(GFP_NOFS);
  3997. BUG_ON(!em || IS_ERR(em));
  3998. em->start = start;
  3999. em->len = extent_key->offset;
  4000. em->block_len = extent_key->offset;
  4001. em->block_start = extent_key->objectid;
  4002. em->bdev = root->fs_info->fs_devices->latest_bdev;
  4003. set_bit(EXTENT_FLAG_PINNED, &em->flags);
  4004. /* setup extent map to cheat btrfs_readpage */
  4005. lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
  4006. while (1) {
  4007. int ret;
  4008. spin_lock(&em_tree->lock);
  4009. ret = add_extent_mapping(em_tree, em);
  4010. spin_unlock(&em_tree->lock);
  4011. if (ret != -EEXIST) {
  4012. free_extent_map(em);
  4013. break;
  4014. }
  4015. btrfs_drop_extent_cache(reloc_inode, start, end, 0);
  4016. }
  4017. unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
  4018. return relocate_inode_pages(reloc_inode, start, extent_key->offset);
  4019. }
  4020. struct btrfs_ref_path {
  4021. u64 extent_start;
  4022. u64 nodes[BTRFS_MAX_LEVEL];
  4023. u64 root_objectid;
  4024. u64 root_generation;
  4025. u64 owner_objectid;
  4026. u32 num_refs;
  4027. int lowest_level;
  4028. int current_level;
  4029. int shared_level;
  4030. struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
  4031. u64 new_nodes[BTRFS_MAX_LEVEL];
  4032. };
  4033. struct disk_extent {
  4034. u64 ram_bytes;
  4035. u64 disk_bytenr;
  4036. u64 disk_num_bytes;
  4037. u64 offset;
  4038. u64 num_bytes;
  4039. u8 compression;
  4040. u8 encryption;
  4041. u16 other_encoding;
  4042. };
  4043. static int is_cowonly_root(u64 root_objectid)
  4044. {
  4045. if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
  4046. root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
  4047. root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
  4048. root_objectid == BTRFS_DEV_TREE_OBJECTID ||
  4049. root_objectid == BTRFS_TREE_LOG_OBJECTID ||
  4050. root_objectid == BTRFS_CSUM_TREE_OBJECTID)
  4051. return 1;
  4052. return 0;
  4053. }
  4054. static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
  4055. struct btrfs_root *extent_root,
  4056. struct btrfs_ref_path *ref_path,
  4057. int first_time)
  4058. {
  4059. struct extent_buffer *leaf;
  4060. struct btrfs_path *path;
  4061. struct btrfs_extent_ref *ref;
  4062. struct btrfs_key key;
  4063. struct btrfs_key found_key;
  4064. u64 bytenr;
  4065. u32 nritems;
  4066. int level;
  4067. int ret = 1;
  4068. path = btrfs_alloc_path();
  4069. if (!path)
  4070. return -ENOMEM;
  4071. if (first_time) {
  4072. ref_path->lowest_level = -1;
  4073. ref_path->current_level = -1;
  4074. ref_path->shared_level = -1;
  4075. goto walk_up;
  4076. }
  4077. walk_down:
  4078. level = ref_path->current_level - 1;
  4079. while (level >= -1) {
  4080. u64 parent;
  4081. if (level < ref_path->lowest_level)
  4082. break;
  4083. if (level >= 0)
  4084. bytenr = ref_path->nodes[level];
  4085. else
  4086. bytenr = ref_path->extent_start;
  4087. BUG_ON(bytenr == 0);
  4088. parent = ref_path->nodes[level + 1];
  4089. ref_path->nodes[level + 1] = 0;
  4090. ref_path->current_level = level;
  4091. BUG_ON(parent == 0);
  4092. key.objectid = bytenr;
  4093. key.offset = parent + 1;
  4094. key.type = BTRFS_EXTENT_REF_KEY;
  4095. ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
  4096. if (ret < 0)
  4097. goto out;
  4098. BUG_ON(ret == 0);
  4099. leaf = path->nodes[0];
  4100. nritems = btrfs_header_nritems(leaf);
  4101. if (path->slots[0] >= nritems) {
  4102. ret = btrfs_next_leaf(extent_root, path);
  4103. if (ret < 0)
  4104. goto out;
  4105. if (ret > 0)
  4106. goto next;
  4107. leaf = path->nodes[0];
  4108. }
  4109. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  4110. if (found_key.objectid == bytenr &&
  4111. found_key.type == BTRFS_EXTENT_REF_KEY) {
  4112. if (level < ref_path->shared_level)
  4113. ref_path->shared_level = level;
  4114. goto found;
  4115. }
  4116. next:
  4117. level--;
  4118. btrfs_release_path(extent_root, path);
  4119. cond_resched();
  4120. }
  4121. /* reached lowest level */
  4122. ret = 1;
  4123. goto out;
  4124. walk_up:
  4125. level = ref_path->current_level;
  4126. while (level < BTRFS_MAX_LEVEL - 1) {
  4127. u64 ref_objectid;
  4128. if (level >= 0)
  4129. bytenr = ref_path->nodes[level];
  4130. else
  4131. bytenr = ref_path->extent_start;
  4132. BUG_ON(bytenr == 0);
  4133. key.objectid = bytenr;
  4134. key.offset = 0;
  4135. key.type = BTRFS_EXTENT_REF_KEY;
  4136. ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
  4137. if (ret < 0)
  4138. goto out;
  4139. leaf = path->nodes[0];
  4140. nritems = btrfs_header_nritems(leaf);
  4141. if (path->slots[0] >= nritems) {
  4142. ret = btrfs_next_leaf(extent_root, path);
  4143. if (ret < 0)
  4144. goto out;
  4145. if (ret > 0) {
  4146. /* the extent was freed by someone */
  4147. if (ref_path->lowest_level == level)
  4148. goto out;
  4149. btrfs_release_path(extent_root, path);
  4150. goto walk_down;
  4151. }
  4152. leaf = path->nodes[0];
  4153. }
  4154. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  4155. if (found_key.objectid != bytenr ||
  4156. found_key.type != BTRFS_EXTENT_REF_KEY) {
  4157. /* the extent was freed by someone */
  4158. if (ref_path->lowest_level == level) {
  4159. ret = 1;
  4160. goto out;
  4161. }
  4162. btrfs_release_path(extent_root, path);
  4163. goto walk_down;
  4164. }
  4165. found:
  4166. ref = btrfs_item_ptr(leaf, path->slots[0],
  4167. struct btrfs_extent_ref);
  4168. ref_objectid = btrfs_ref_objectid(leaf, ref);
  4169. if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
  4170. if (first_time) {
  4171. level = (int)ref_objectid;
  4172. BUG_ON(level >= BTRFS_MAX_LEVEL);
  4173. ref_path->lowest_level = level;
  4174. ref_path->current_level = level;
  4175. ref_path->nodes[level] = bytenr;
  4176. } else {
  4177. WARN_ON(ref_objectid != level);
  4178. }
  4179. } else {
  4180. WARN_ON(level != -1);
  4181. }
  4182. first_time = 0;
  4183. if (ref_path->lowest_level == level) {
  4184. ref_path->owner_objectid = ref_objectid;
  4185. ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
  4186. }
  4187. /*
  4188. * the block is tree root or the block isn't in reference
  4189. * counted tree.
  4190. */
  4191. if (found_key.objectid == found_key.offset ||
  4192. is_cowonly_root(btrfs_ref_root(leaf, ref))) {
  4193. ref_path->root_objectid = btrfs_ref_root(leaf, ref);
  4194. ref_path->root_generation =
  4195. btrfs_ref_generation(leaf, ref);
  4196. if (level < 0) {
  4197. /* special reference from the tree log */
  4198. ref_path->nodes[0] = found_key.offset;
  4199. ref_path->current_level = 0;
  4200. }
  4201. ret = 0;
  4202. goto out;
  4203. }
  4204. level++;
  4205. BUG_ON(ref_path->nodes[level] != 0);
  4206. ref_path->nodes[level] = found_key.offset;
  4207. ref_path->current_level = level;
  4208. /*
  4209. * the reference was created in the running transaction,
  4210. * no need to continue walking up.
  4211. */
  4212. if (btrfs_ref_generation(leaf, ref) == trans->transid) {
  4213. ref_path->root_objectid = btrfs_ref_root(leaf, ref);
  4214. ref_path->root_generation =
  4215. btrfs_ref_generation(leaf, ref);
  4216. ret = 0;
  4217. goto out;
  4218. }
  4219. btrfs_release_path(extent_root, path);
  4220. cond_resched();
  4221. }
  4222. /* reached max tree level, but no tree root found. */
  4223. BUG();
  4224. out:
  4225. btrfs_free_path(path);
  4226. return ret;
  4227. }
  4228. static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
  4229. struct btrfs_root *extent_root,
  4230. struct btrfs_ref_path *ref_path,
  4231. u64 extent_start)
  4232. {
  4233. memset(ref_path, 0, sizeof(*ref_path));
  4234. ref_path->extent_start = extent_start;
  4235. return __next_ref_path(trans, extent_root, ref_path, 1);
  4236. }
  4237. static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
  4238. struct btrfs_root *extent_root,
  4239. struct btrfs_ref_path *ref_path)
  4240. {
  4241. return __next_ref_path(trans, extent_root, ref_path, 0);
  4242. }
  4243. static noinline int get_new_locations(struct inode *reloc_inode,
  4244. struct btrfs_key *extent_key,
  4245. u64 offset, int no_fragment,
  4246. struct disk_extent **extents,
  4247. int *nr_extents)
  4248. {
  4249. struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
  4250. struct btrfs_path *path;
  4251. struct btrfs_file_extent_item *fi;
  4252. struct extent_buffer *leaf;
  4253. struct disk_extent *exts = *extents;
  4254. struct btrfs_key found_key;
  4255. u64 cur_pos;
  4256. u64 last_byte;
  4257. u32 nritems;
  4258. int nr = 0;
  4259. int max = *nr_extents;
  4260. int ret;
  4261. WARN_ON(!no_fragment && *extents);
  4262. if (!exts) {
  4263. max = 1;
  4264. exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
  4265. if (!exts)
  4266. return -ENOMEM;
  4267. }
  4268. path = btrfs_alloc_path();
  4269. BUG_ON(!path);
  4270. cur_pos = extent_key->objectid - offset;
  4271. last_byte = extent_key->objectid + extent_key->offset;
  4272. ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
  4273. cur_pos, 0);
  4274. if (ret < 0)
  4275. goto out;
  4276. if (ret > 0) {
  4277. ret = -ENOENT;
  4278. goto out;
  4279. }
  4280. while (1) {
  4281. leaf = path->nodes[0];
  4282. nritems = btrfs_header_nritems(leaf);
  4283. if (path->slots[0] >= nritems) {
  4284. ret = btrfs_next_leaf(root, path);
  4285. if (ret < 0)
  4286. goto out;
  4287. if (ret > 0)
  4288. break;
  4289. leaf = path->nodes[0];
  4290. }
  4291. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  4292. if (found_key.offset != cur_pos ||
  4293. found_key.type != BTRFS_EXTENT_DATA_KEY ||
  4294. found_key.objectid != reloc_inode->i_ino)
  4295. break;
  4296. fi = btrfs_item_ptr(leaf, path->slots[0],
  4297. struct btrfs_file_extent_item);
  4298. if (btrfs_file_extent_type(leaf, fi) !=
  4299. BTRFS_FILE_EXTENT_REG ||
  4300. btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
  4301. break;
  4302. if (nr == max) {
  4303. struct disk_extent *old = exts;
  4304. max *= 2;
  4305. exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
  4306. memcpy(exts, old, sizeof(*exts) * nr);
  4307. if (old != *extents)
  4308. kfree(old);
  4309. }
  4310. exts[nr].disk_bytenr =
  4311. btrfs_file_extent_disk_bytenr(leaf, fi);
  4312. exts[nr].disk_num_bytes =
  4313. btrfs_file_extent_disk_num_bytes(leaf, fi);
  4314. exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
  4315. exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
  4316. exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
  4317. exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
  4318. exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
  4319. exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
  4320. fi);
  4321. BUG_ON(exts[nr].offset > 0);
  4322. BUG_ON(exts[nr].compression || exts[nr].encryption);
  4323. BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
  4324. cur_pos += exts[nr].num_bytes;
  4325. nr++;
  4326. if (cur_pos + offset >= last_byte)
  4327. break;
  4328. if (no_fragment) {
  4329. ret = 1;
  4330. goto out;
  4331. }
  4332. path->slots[0]++;
  4333. }
  4334. BUG_ON(cur_pos + offset > last_byte);
  4335. if (cur_pos + offset < last_byte) {
  4336. ret = -ENOENT;
  4337. goto out;
  4338. }
  4339. ret = 0;
  4340. out:
  4341. btrfs_free_path(path);
  4342. if (ret) {
  4343. if (exts != *extents)
  4344. kfree(exts);
  4345. } else {
  4346. *extents = exts;
  4347. *nr_extents = nr;
  4348. }
  4349. return ret;
  4350. }
  4351. static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
  4352. struct btrfs_root *root,
  4353. struct btrfs_path *path,
  4354. struct btrfs_key *extent_key,
  4355. struct btrfs_key *leaf_key,
  4356. struct btrfs_ref_path *ref_path,
  4357. struct disk_extent *new_extents,
  4358. int nr_extents)
  4359. {
  4360. struct extent_buffer *leaf;
  4361. struct btrfs_file_extent_item *fi;
  4362. struct inode *inode = NULL;
  4363. struct btrfs_key key;
  4364. u64 lock_start = 0;
  4365. u64 lock_end = 0;
  4366. u64 num_bytes;
  4367. u64 ext_offset;
  4368. u64 search_end = (u64)-1;
  4369. u32 nritems;
  4370. int nr_scaned = 0;
  4371. int extent_locked = 0;
  4372. int extent_type;
  4373. int ret;
  4374. memcpy(&key, leaf_key, sizeof(key));
  4375. if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
  4376. if (key.objectid < ref_path->owner_objectid ||
  4377. (key.objectid == ref_path->owner_objectid &&
  4378. key.type < BTRFS_EXTENT_DATA_KEY)) {
  4379. key.objectid = ref_path->owner_objectid;
  4380. key.type = BTRFS_EXTENT_DATA_KEY;
  4381. key.offset = 0;
  4382. }
  4383. }
  4384. while (1) {
  4385. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  4386. if (ret < 0)
  4387. goto out;
  4388. leaf = path->nodes[0];
  4389. nritems = btrfs_header_nritems(leaf);
  4390. next:
  4391. if (extent_locked && ret > 0) {
  4392. /*
  4393. * the file extent item was modified by someone
  4394. * before the extent got locked.
  4395. */
  4396. unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
  4397. lock_end, GFP_NOFS);
  4398. extent_locked = 0;
  4399. }
  4400. if (path->slots[0] >= nritems) {
  4401. if (++nr_scaned > 2)
  4402. break;
  4403. BUG_ON(extent_locked);
  4404. ret = btrfs_next_leaf(root, path);
  4405. if (ret < 0)
  4406. goto out;
  4407. if (ret > 0)
  4408. break;
  4409. leaf = path->nodes[0];
  4410. nritems = btrfs_header_nritems(leaf);
  4411. }
  4412. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  4413. if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
  4414. if ((key.objectid > ref_path->owner_objectid) ||
  4415. (key.objectid == ref_path->owner_objectid &&
  4416. key.type > BTRFS_EXTENT_DATA_KEY) ||
  4417. key.offset >= search_end)
  4418. break;
  4419. }
  4420. if (inode && key.objectid != inode->i_ino) {
  4421. BUG_ON(extent_locked);
  4422. btrfs_release_path(root, path);
  4423. mutex_unlock(&inode->i_mutex);
  4424. iput(inode);
  4425. inode = NULL;
  4426. continue;
  4427. }
  4428. if (key.type != BTRFS_EXTENT_DATA_KEY) {
  4429. path->slots[0]++;
  4430. ret = 1;
  4431. goto next;
  4432. }
  4433. fi = btrfs_item_ptr(leaf, path->slots[0],
  4434. struct btrfs_file_extent_item);
  4435. extent_type = btrfs_file_extent_type(leaf, fi);
  4436. if ((extent_type != BTRFS_FILE_EXTENT_REG &&
  4437. extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
  4438. (btrfs_file_extent_disk_bytenr(leaf, fi) !=
  4439. extent_key->objectid)) {
  4440. path->slots[0]++;
  4441. ret = 1;
  4442. goto next;
  4443. }
  4444. num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
  4445. ext_offset = btrfs_file_extent_offset(leaf, fi);
  4446. if (search_end == (u64)-1) {
  4447. search_end = key.offset - ext_offset +
  4448. btrfs_file_extent_ram_bytes(leaf, fi);
  4449. }
  4450. if (!extent_locked) {
  4451. lock_start = key.offset;
  4452. lock_end = lock_start + num_bytes - 1;
  4453. } else {
  4454. if (lock_start > key.offset ||
  4455. lock_end + 1 < key.offset + num_bytes) {
  4456. unlock_extent(&BTRFS_I(inode)->io_tree,
  4457. lock_start, lock_end, GFP_NOFS);
  4458. extent_locked = 0;
  4459. }
  4460. }
  4461. if (!inode) {
  4462. btrfs_release_path(root, path);
  4463. inode = btrfs_iget_locked(root->fs_info->sb,
  4464. key.objectid, root);
  4465. if (inode->i_state & I_NEW) {
  4466. BTRFS_I(inode)->root = root;
  4467. BTRFS_I(inode)->location.objectid =
  4468. key.objectid;
  4469. BTRFS_I(inode)->location.type =
  4470. BTRFS_INODE_ITEM_KEY;
  4471. BTRFS_I(inode)->location.offset = 0;
  4472. btrfs_read_locked_inode(inode);
  4473. unlock_new_inode(inode);
  4474. }
  4475. /*
  4476. * some code call btrfs_commit_transaction while
  4477. * holding the i_mutex, so we can't use mutex_lock
  4478. * here.
  4479. */
  4480. if (is_bad_inode(inode) ||
  4481. !mutex_trylock(&inode->i_mutex)) {
  4482. iput(inode);
  4483. inode = NULL;
  4484. key.offset = (u64)-1;
  4485. goto skip;
  4486. }
  4487. }
  4488. if (!extent_locked) {
  4489. struct btrfs_ordered_extent *ordered;
  4490. btrfs_release_path(root, path);
  4491. lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
  4492. lock_end, GFP_NOFS);
  4493. ordered = btrfs_lookup_first_ordered_extent(inode,
  4494. lock_end);
  4495. if (ordered &&
  4496. ordered->file_offset <= lock_end &&
  4497. ordered->file_offset + ordered->len > lock_start) {
  4498. unlock_extent(&BTRFS_I(inode)->io_tree,
  4499. lock_start, lock_end, GFP_NOFS);
  4500. btrfs_start_ordered_extent(inode, ordered, 1);
  4501. btrfs_put_ordered_extent(ordered);
  4502. key.offset += num_bytes;
  4503. goto skip;
  4504. }
  4505. if (ordered)
  4506. btrfs_put_ordered_extent(ordered);
  4507. extent_locked = 1;
  4508. continue;
  4509. }
  4510. if (nr_extents == 1) {
  4511. /* update extent pointer in place */
  4512. btrfs_set_file_extent_disk_bytenr(leaf, fi,
  4513. new_extents[0].disk_bytenr);
  4514. btrfs_set_file_extent_disk_num_bytes(leaf, fi,
  4515. new_extents[0].disk_num_bytes);
  4516. btrfs_mark_buffer_dirty(leaf);
  4517. btrfs_drop_extent_cache(inode, key.offset,
  4518. key.offset + num_bytes - 1, 0);
  4519. ret = btrfs_inc_extent_ref(trans, root,
  4520. new_extents[0].disk_bytenr,
  4521. new_extents[0].disk_num_bytes,
  4522. leaf->start,
  4523. root->root_key.objectid,
  4524. trans->transid,
  4525. key.objectid);
  4526. BUG_ON(ret);
  4527. ret = btrfs_free_extent(trans, root,
  4528. extent_key->objectid,
  4529. extent_key->offset,
  4530. leaf->start,
  4531. btrfs_header_owner(leaf),
  4532. btrfs_header_generation(leaf),
  4533. key.objectid, 0);
  4534. BUG_ON(ret);
  4535. btrfs_release_path(root, path);
  4536. key.offset += num_bytes;
  4537. } else {
  4538. BUG_ON(1);
  4539. #if 0
  4540. u64 alloc_hint;
  4541. u64 extent_len;
  4542. int i;
  4543. /*
  4544. * drop old extent pointer at first, then insert the
  4545. * new pointers one bye one
  4546. */
  4547. btrfs_release_path(root, path);
  4548. ret = btrfs_drop_extents(trans, root, inode, key.offset,
  4549. key.offset + num_bytes,
  4550. key.offset, &alloc_hint);
  4551. BUG_ON(ret);
  4552. for (i = 0; i < nr_extents; i++) {
  4553. if (ext_offset >= new_extents[i].num_bytes) {
  4554. ext_offset -= new_extents[i].num_bytes;
  4555. continue;
  4556. }
  4557. extent_len = min(new_extents[i].num_bytes -
  4558. ext_offset, num_bytes);
  4559. ret = btrfs_insert_empty_item(trans, root,
  4560. path, &key,
  4561. sizeof(*fi));
  4562. BUG_ON(ret);
  4563. leaf = path->nodes[0];
  4564. fi = btrfs_item_ptr(leaf, path->slots[0],
  4565. struct btrfs_file_extent_item);
  4566. btrfs_set_file_extent_generation(leaf, fi,
  4567. trans->transid);
  4568. btrfs_set_file_extent_type(leaf, fi,
  4569. BTRFS_FILE_EXTENT_REG);
  4570. btrfs_set_file_extent_disk_bytenr(leaf, fi,
  4571. new_extents[i].disk_bytenr);
  4572. btrfs_set_file_extent_disk_num_bytes(leaf, fi,
  4573. new_extents[i].disk_num_bytes);
  4574. btrfs_set_file_extent_ram_bytes(leaf, fi,
  4575. new_extents[i].ram_bytes);
  4576. btrfs_set_file_extent_compression(leaf, fi,
  4577. new_extents[i].compression);
  4578. btrfs_set_file_extent_encryption(leaf, fi,
  4579. new_extents[i].encryption);
  4580. btrfs_set_file_extent_other_encoding(leaf, fi,
  4581. new_extents[i].other_encoding);
  4582. btrfs_set_file_extent_num_bytes(leaf, fi,
  4583. extent_len);
  4584. ext_offset += new_extents[i].offset;
  4585. btrfs_set_file_extent_offset(leaf, fi,
  4586. ext_offset);
  4587. btrfs_mark_buffer_dirty(leaf);
  4588. btrfs_drop_extent_cache(inode, key.offset,
  4589. key.offset + extent_len - 1, 0);
  4590. ret = btrfs_inc_extent_ref(trans, root,
  4591. new_extents[i].disk_bytenr,
  4592. new_extents[i].disk_num_bytes,
  4593. leaf->start,
  4594. root->root_key.objectid,
  4595. trans->transid, key.objectid);
  4596. BUG_ON(ret);
  4597. btrfs_release_path(root, path);
  4598. inode_add_bytes(inode, extent_len);
  4599. ext_offset = 0;
  4600. num_bytes -= extent_len;
  4601. key.offset += extent_len;
  4602. if (num_bytes == 0)
  4603. break;
  4604. }
  4605. BUG_ON(i >= nr_extents);
  4606. #endif
  4607. }
  4608. if (extent_locked) {
  4609. unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
  4610. lock_end, GFP_NOFS);
  4611. extent_locked = 0;
  4612. }
  4613. skip:
  4614. if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
  4615. key.offset >= search_end)
  4616. break;
  4617. cond_resched();
  4618. }
  4619. ret = 0;
  4620. out:
  4621. btrfs_release_path(root, path);
  4622. if (inode) {
  4623. mutex_unlock(&inode->i_mutex);
  4624. if (extent_locked) {
  4625. unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
  4626. lock_end, GFP_NOFS);
  4627. }
  4628. iput(inode);
  4629. }
  4630. return ret;
  4631. }
  4632. int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
  4633. struct btrfs_root *root,
  4634. struct extent_buffer *buf, u64 orig_start)
  4635. {
  4636. int level;
  4637. int ret;
  4638. BUG_ON(btrfs_header_generation(buf) != trans->transid);
  4639. BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  4640. level = btrfs_header_level(buf);
  4641. if (level == 0) {
  4642. struct btrfs_leaf_ref *ref;
  4643. struct btrfs_leaf_ref *orig_ref;
  4644. orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
  4645. if (!orig_ref)
  4646. return -ENOENT;
  4647. ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
  4648. if (!ref) {
  4649. btrfs_free_leaf_ref(root, orig_ref);
  4650. return -ENOMEM;
  4651. }
  4652. ref->nritems = orig_ref->nritems;
  4653. memcpy(ref->extents, orig_ref->extents,
  4654. sizeof(ref->extents[0]) * ref->nritems);
  4655. btrfs_free_leaf_ref(root, orig_ref);
  4656. ref->root_gen = trans->transid;
  4657. ref->bytenr = buf->start;
  4658. ref->owner = btrfs_header_owner(buf);
  4659. ref->generation = btrfs_header_generation(buf);
  4660. ret = btrfs_add_leaf_ref(root, ref, 0);
  4661. WARN_ON(ret);
  4662. btrfs_free_leaf_ref(root, ref);
  4663. }
  4664. return 0;
  4665. }
  4666. static noinline int invalidate_extent_cache(struct btrfs_root *root,
  4667. struct extent_buffer *leaf,
  4668. struct btrfs_block_group_cache *group,
  4669. struct btrfs_root *target_root)
  4670. {
  4671. struct btrfs_key key;
  4672. struct inode *inode = NULL;
  4673. struct btrfs_file_extent_item *fi;
  4674. u64 num_bytes;
  4675. u64 skip_objectid = 0;
  4676. u32 nritems;
  4677. u32 i;
  4678. nritems = btrfs_header_nritems(leaf);
  4679. for (i = 0; i < nritems; i++) {
  4680. btrfs_item_key_to_cpu(leaf, &key, i);
  4681. if (key.objectid == skip_objectid ||
  4682. key.type != BTRFS_EXTENT_DATA_KEY)
  4683. continue;
  4684. fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
  4685. if (btrfs_file_extent_type(leaf, fi) ==
  4686. BTRFS_FILE_EXTENT_INLINE)
  4687. continue;
  4688. if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
  4689. continue;
  4690. if (!inode || inode->i_ino != key.objectid) {
  4691. iput(inode);
  4692. inode = btrfs_ilookup(target_root->fs_info->sb,
  4693. key.objectid, target_root, 1);
  4694. }
  4695. if (!inode) {
  4696. skip_objectid = key.objectid;
  4697. continue;
  4698. }
  4699. num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
  4700. lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
  4701. key.offset + num_bytes - 1, GFP_NOFS);
  4702. btrfs_drop_extent_cache(inode, key.offset,
  4703. key.offset + num_bytes - 1, 1);
  4704. unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
  4705. key.offset + num_bytes - 1, GFP_NOFS);
  4706. cond_resched();
  4707. }
  4708. iput(inode);
  4709. return 0;
  4710. }
  4711. static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
  4712. struct btrfs_root *root,
  4713. struct extent_buffer *leaf,
  4714. struct btrfs_block_group_cache *group,
  4715. struct inode *reloc_inode)
  4716. {
  4717. struct btrfs_key key;
  4718. struct btrfs_key extent_key;
  4719. struct btrfs_file_extent_item *fi;
  4720. struct btrfs_leaf_ref *ref;
  4721. struct disk_extent *new_extent;
  4722. u64 bytenr;
  4723. u64 num_bytes;
  4724. u32 nritems;
  4725. u32 i;
  4726. int ext_index;
  4727. int nr_extent;
  4728. int ret;
  4729. new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
  4730. BUG_ON(!new_extent);
  4731. ref = btrfs_lookup_leaf_ref(root, leaf->start);
  4732. BUG_ON(!ref);
  4733. ext_index = -1;
  4734. nritems = btrfs_header_nritems(leaf);
  4735. for (i = 0; i < nritems; i++) {
  4736. btrfs_item_key_to_cpu(leaf, &key, i);
  4737. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  4738. continue;
  4739. fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
  4740. if (btrfs_file_extent_type(leaf, fi) ==
  4741. BTRFS_FILE_EXTENT_INLINE)
  4742. continue;
  4743. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  4744. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  4745. if (bytenr == 0)
  4746. continue;
  4747. ext_index++;
  4748. if (bytenr >= group->key.objectid + group->key.offset ||
  4749. bytenr + num_bytes <= group->key.objectid)
  4750. continue;
  4751. extent_key.objectid = bytenr;
  4752. extent_key.offset = num_bytes;
  4753. extent_key.type = BTRFS_EXTENT_ITEM_KEY;
  4754. nr_extent = 1;
  4755. ret = get_new_locations(reloc_inode, &extent_key,
  4756. group->key.objectid, 1,
  4757. &new_extent, &nr_extent);
  4758. if (ret > 0)
  4759. continue;
  4760. BUG_ON(ret < 0);
  4761. BUG_ON(ref->extents[ext_index].bytenr != bytenr);
  4762. BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
  4763. ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
  4764. ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
  4765. btrfs_set_file_extent_disk_bytenr(leaf, fi,
  4766. new_extent->disk_bytenr);
  4767. btrfs_set_file_extent_disk_num_bytes(leaf, fi,
  4768. new_extent->disk_num_bytes);
  4769. btrfs_mark_buffer_dirty(leaf);
  4770. ret = btrfs_inc_extent_ref(trans, root,
  4771. new_extent->disk_bytenr,
  4772. new_extent->disk_num_bytes,
  4773. leaf->start,
  4774. root->root_key.objectid,
  4775. trans->transid, key.objectid);
  4776. BUG_ON(ret);
  4777. ret = btrfs_free_extent(trans, root,
  4778. bytenr, num_bytes, leaf->start,
  4779. btrfs_header_owner(leaf),
  4780. btrfs_header_generation(leaf),
  4781. key.objectid, 0);
  4782. BUG_ON(ret);
  4783. cond_resched();
  4784. }
  4785. kfree(new_extent);
  4786. BUG_ON(ext_index + 1 != ref->nritems);
  4787. btrfs_free_leaf_ref(root, ref);
  4788. return 0;
  4789. }
  4790. int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
  4791. struct btrfs_root *root)
  4792. {
  4793. struct btrfs_root *reloc_root;
  4794. int ret;
  4795. if (root->reloc_root) {
  4796. reloc_root = root->reloc_root;
  4797. root->reloc_root = NULL;
  4798. list_add(&reloc_root->dead_list,
  4799. &root->fs_info->dead_reloc_roots);
  4800. btrfs_set_root_bytenr(&reloc_root->root_item,
  4801. reloc_root->node->start);
  4802. btrfs_set_root_level(&root->root_item,
  4803. btrfs_header_level(reloc_root->node));
  4804. memset(&reloc_root->root_item.drop_progress, 0,
  4805. sizeof(struct btrfs_disk_key));
  4806. reloc_root->root_item.drop_level = 0;
  4807. ret = btrfs_update_root(trans, root->fs_info->tree_root,
  4808. &reloc_root->root_key,
  4809. &reloc_root->root_item);
  4810. BUG_ON(ret);
  4811. }
  4812. return 0;
  4813. }
  4814. int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
  4815. {
  4816. struct btrfs_trans_handle *trans;
  4817. struct btrfs_root *reloc_root;
  4818. struct btrfs_root *prev_root = NULL;
  4819. struct list_head dead_roots;
  4820. int ret;
  4821. unsigned long nr;
  4822. INIT_LIST_HEAD(&dead_roots);
  4823. list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
  4824. while (!list_empty(&dead_roots)) {
  4825. reloc_root = list_entry(dead_roots.prev,
  4826. struct btrfs_root, dead_list);
  4827. list_del_init(&reloc_root->dead_list);
  4828. BUG_ON(reloc_root->commit_root != NULL);
  4829. while (1) {
  4830. trans = btrfs_join_transaction(root, 1);
  4831. BUG_ON(!trans);
  4832. mutex_lock(&root->fs_info->drop_mutex);
  4833. ret = btrfs_drop_snapshot(trans, reloc_root);
  4834. if (ret != -EAGAIN)
  4835. break;
  4836. mutex_unlock(&root->fs_info->drop_mutex);
  4837. nr = trans->blocks_used;
  4838. ret = btrfs_end_transaction(trans, root);
  4839. BUG_ON(ret);
  4840. btrfs_btree_balance_dirty(root, nr);
  4841. }
  4842. free_extent_buffer(reloc_root->node);
  4843. ret = btrfs_del_root(trans, root->fs_info->tree_root,
  4844. &reloc_root->root_key);
  4845. BUG_ON(ret);
  4846. mutex_unlock(&root->fs_info->drop_mutex);
  4847. nr = trans->blocks_used;
  4848. ret = btrfs_end_transaction(trans, root);
  4849. BUG_ON(ret);
  4850. btrfs_btree_balance_dirty(root, nr);
  4851. kfree(prev_root);
  4852. prev_root = reloc_root;
  4853. }
  4854. if (prev_root) {
  4855. btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
  4856. kfree(prev_root);
  4857. }
  4858. return 0;
  4859. }
  4860. int btrfs_add_dead_reloc_root(struct btrfs_root *root)
  4861. {
  4862. list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
  4863. return 0;
  4864. }
  4865. int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
  4866. {
  4867. struct btrfs_root *reloc_root;
  4868. struct btrfs_trans_handle *trans;
  4869. struct btrfs_key location;
  4870. int found;
  4871. int ret;
  4872. mutex_lock(&root->fs_info->tree_reloc_mutex);
  4873. ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
  4874. BUG_ON(ret);
  4875. found = !list_empty(&root->fs_info->dead_reloc_roots);
  4876. mutex_unlock(&root->fs_info->tree_reloc_mutex);
  4877. if (found) {
  4878. trans = btrfs_start_transaction(root, 1);
  4879. BUG_ON(!trans);
  4880. ret = btrfs_commit_transaction(trans, root);
  4881. BUG_ON(ret);
  4882. }
  4883. location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
  4884. location.offset = (u64)-1;
  4885. location.type = BTRFS_ROOT_ITEM_KEY;
  4886. reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
  4887. BUG_ON(!reloc_root);
  4888. btrfs_orphan_cleanup(reloc_root);
  4889. return 0;
  4890. }
  4891. static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
  4892. struct btrfs_root *root)
  4893. {
  4894. struct btrfs_root *reloc_root;
  4895. struct extent_buffer *eb;
  4896. struct btrfs_root_item *root_item;
  4897. struct btrfs_key root_key;
  4898. int ret;
  4899. BUG_ON(!root->ref_cows);
  4900. if (root->reloc_root)
  4901. return 0;
  4902. root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
  4903. BUG_ON(!root_item);
  4904. ret = btrfs_copy_root(trans, root, root->commit_root,
  4905. &eb, BTRFS_TREE_RELOC_OBJECTID);
  4906. BUG_ON(ret);
  4907. root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
  4908. root_key.offset = root->root_key.objectid;
  4909. root_key.type = BTRFS_ROOT_ITEM_KEY;
  4910. memcpy(root_item, &root->root_item, sizeof(root_item));
  4911. btrfs_set_root_refs(root_item, 0);
  4912. btrfs_set_root_bytenr(root_item, eb->start);
  4913. btrfs_set_root_level(root_item, btrfs_header_level(eb));
  4914. btrfs_set_root_generation(root_item, trans->transid);
  4915. btrfs_tree_unlock(eb);
  4916. free_extent_buffer(eb);
  4917. ret = btrfs_insert_root(trans, root->fs_info->tree_root,
  4918. &root_key, root_item);
  4919. BUG_ON(ret);
  4920. kfree(root_item);
  4921. reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
  4922. &root_key);
  4923. BUG_ON(!reloc_root);
  4924. reloc_root->last_trans = trans->transid;
  4925. reloc_root->commit_root = NULL;
  4926. reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
  4927. root->reloc_root = reloc_root;
  4928. return 0;
  4929. }
  4930. /*
  4931. * Core function of space balance.
  4932. *
  4933. * The idea is using reloc trees to relocate tree blocks in reference
  4934. * counted roots. There is one reloc tree for each subvol, and all
  4935. * reloc trees share same root key objectid. Reloc trees are snapshots
  4936. * of the latest committed roots of subvols (root->commit_root).
  4937. *
  4938. * To relocate a tree block referenced by a subvol, there are two steps.
  4939. * COW the block through subvol's reloc tree, then update block pointer
  4940. * in the subvol to point to the new block. Since all reloc trees share
  4941. * same root key objectid, doing special handing for tree blocks owned
  4942. * by them is easy. Once a tree block has been COWed in one reloc tree,
  4943. * we can use the resulting new block directly when the same block is
  4944. * required to COW again through other reloc trees. By this way, relocated
  4945. * tree blocks are shared between reloc trees, so they are also shared
  4946. * between subvols.
  4947. */
  4948. static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
  4949. struct btrfs_root *root,
  4950. struct btrfs_path *path,
  4951. struct btrfs_key *first_key,
  4952. struct btrfs_ref_path *ref_path,
  4953. struct btrfs_block_group_cache *group,
  4954. struct inode *reloc_inode)
  4955. {
  4956. struct btrfs_root *reloc_root;
  4957. struct extent_buffer *eb = NULL;
  4958. struct btrfs_key *keys;
  4959. u64 *nodes;
  4960. int level;
  4961. int shared_level;
  4962. int lowest_level = 0;
  4963. int ret;
  4964. if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
  4965. lowest_level = ref_path->owner_objectid;
  4966. if (!root->ref_cows) {
  4967. path->lowest_level = lowest_level;
  4968. ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
  4969. BUG_ON(ret < 0);
  4970. path->lowest_level = 0;
  4971. btrfs_release_path(root, path);
  4972. return 0;
  4973. }
  4974. mutex_lock(&root->fs_info->tree_reloc_mutex);
  4975. ret = init_reloc_tree(trans, root);
  4976. BUG_ON(ret);
  4977. reloc_root = root->reloc_root;
  4978. shared_level = ref_path->shared_level;
  4979. ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
  4980. keys = ref_path->node_keys;
  4981. nodes = ref_path->new_nodes;
  4982. memset(&keys[shared_level + 1], 0,
  4983. sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
  4984. memset(&nodes[shared_level + 1], 0,
  4985. sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
  4986. if (nodes[lowest_level] == 0) {
  4987. path->lowest_level = lowest_level;
  4988. ret = btrfs_search_slot(trans, reloc_root, first_key, path,
  4989. 0, 1);
  4990. BUG_ON(ret);
  4991. for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
  4992. eb = path->nodes[level];
  4993. if (!eb || eb == reloc_root->node)
  4994. break;
  4995. nodes[level] = eb->start;
  4996. if (level == 0)
  4997. btrfs_item_key_to_cpu(eb, &keys[level], 0);
  4998. else
  4999. btrfs_node_key_to_cpu(eb, &keys[level], 0);
  5000. }
  5001. if (nodes[0] &&
  5002. ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
  5003. eb = path->nodes[0];
  5004. ret = replace_extents_in_leaf(trans, reloc_root, eb,
  5005. group, reloc_inode);
  5006. BUG_ON(ret);
  5007. }
  5008. btrfs_release_path(reloc_root, path);
  5009. } else {
  5010. ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
  5011. lowest_level);
  5012. BUG_ON(ret);
  5013. }
  5014. /*
  5015. * replace tree blocks in the fs tree with tree blocks in
  5016. * the reloc tree.
  5017. */
  5018. ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
  5019. BUG_ON(ret < 0);
  5020. if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
  5021. ret = btrfs_search_slot(trans, reloc_root, first_key, path,
  5022. 0, 0);
  5023. BUG_ON(ret);
  5024. extent_buffer_get(path->nodes[0]);
  5025. eb = path->nodes[0];
  5026. btrfs_release_path(reloc_root, path);
  5027. ret = invalidate_extent_cache(reloc_root, eb, group, root);
  5028. BUG_ON(ret);
  5029. free_extent_buffer(eb);
  5030. }
  5031. mutex_unlock(&root->fs_info->tree_reloc_mutex);
  5032. path->lowest_level = 0;
  5033. return 0;
  5034. }
  5035. static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
  5036. struct btrfs_root *root,
  5037. struct btrfs_path *path,
  5038. struct btrfs_key *first_key,
  5039. struct btrfs_ref_path *ref_path)
  5040. {
  5041. int ret;
  5042. ret = relocate_one_path(trans, root, path, first_key,
  5043. ref_path, NULL, NULL);
  5044. BUG_ON(ret);
  5045. if (root == root->fs_info->extent_root)
  5046. btrfs_extent_post_op(trans, root);
  5047. return 0;
  5048. }
  5049. static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
  5050. struct btrfs_root *extent_root,
  5051. struct btrfs_path *path,
  5052. struct btrfs_key *extent_key)
  5053. {
  5054. int ret;
  5055. ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
  5056. if (ret)
  5057. goto out;
  5058. ret = btrfs_del_item(trans, extent_root, path);
  5059. out:
  5060. btrfs_release_path(extent_root, path);
  5061. return ret;
  5062. }
  5063. static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
  5064. struct btrfs_ref_path *ref_path)
  5065. {
  5066. struct btrfs_key root_key;
  5067. root_key.objectid = ref_path->root_objectid;
  5068. root_key.type = BTRFS_ROOT_ITEM_KEY;
  5069. if (is_cowonly_root(ref_path->root_objectid))
  5070. root_key.offset = 0;
  5071. else
  5072. root_key.offset = (u64)-1;
  5073. return btrfs_read_fs_root_no_name(fs_info, &root_key);
  5074. }
  5075. static noinline int relocate_one_extent(struct btrfs_root *extent_root,
  5076. struct btrfs_path *path,
  5077. struct btrfs_key *extent_key,
  5078. struct btrfs_block_group_cache *group,
  5079. struct inode *reloc_inode, int pass)
  5080. {
  5081. struct btrfs_trans_handle *trans;
  5082. struct btrfs_root *found_root;
  5083. struct btrfs_ref_path *ref_path = NULL;
  5084. struct disk_extent *new_extents = NULL;
  5085. int nr_extents = 0;
  5086. int loops;
  5087. int ret;
  5088. int level;
  5089. struct btrfs_key first_key;
  5090. u64 prev_block = 0;
  5091. trans = btrfs_start_transaction(extent_root, 1);
  5092. BUG_ON(!trans);
  5093. if (extent_key->objectid == 0) {
  5094. ret = del_extent_zero(trans, extent_root, path, extent_key);
  5095. goto out;
  5096. }
  5097. ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
  5098. if (!ref_path) {
  5099. ret = -ENOMEM;
  5100. goto out;
  5101. }
  5102. for (loops = 0; ; loops++) {
  5103. if (loops == 0) {
  5104. ret = btrfs_first_ref_path(trans, extent_root, ref_path,
  5105. extent_key->objectid);
  5106. } else {
  5107. ret = btrfs_next_ref_path(trans, extent_root, ref_path);
  5108. }
  5109. if (ret < 0)
  5110. goto out;
  5111. if (ret > 0)
  5112. break;
  5113. if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
  5114. ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
  5115. continue;
  5116. found_root = read_ref_root(extent_root->fs_info, ref_path);
  5117. BUG_ON(!found_root);
  5118. /*
  5119. * for reference counted tree, only process reference paths
  5120. * rooted at the latest committed root.
  5121. */
  5122. if (found_root->ref_cows &&
  5123. ref_path->root_generation != found_root->root_key.offset)
  5124. continue;
  5125. if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
  5126. if (pass == 0) {
  5127. /*
  5128. * copy data extents to new locations
  5129. */
  5130. u64 group_start = group->key.objectid;
  5131. ret = relocate_data_extent(reloc_inode,
  5132. extent_key,
  5133. group_start);
  5134. if (ret < 0)
  5135. goto out;
  5136. break;
  5137. }
  5138. level = 0;
  5139. } else {
  5140. level = ref_path->owner_objectid;
  5141. }
  5142. if (prev_block != ref_path->nodes[level]) {
  5143. struct extent_buffer *eb;
  5144. u64 block_start = ref_path->nodes[level];
  5145. u64 block_size = btrfs_level_size(found_root, level);
  5146. eb = read_tree_block(found_root, block_start,
  5147. block_size, 0);
  5148. btrfs_tree_lock(eb);
  5149. BUG_ON(level != btrfs_header_level(eb));
  5150. if (level == 0)
  5151. btrfs_item_key_to_cpu(eb, &first_key, 0);
  5152. else
  5153. btrfs_node_key_to_cpu(eb, &first_key, 0);
  5154. btrfs_tree_unlock(eb);
  5155. free_extent_buffer(eb);
  5156. prev_block = block_start;
  5157. }
  5158. mutex_lock(&extent_root->fs_info->trans_mutex);
  5159. btrfs_record_root_in_trans(found_root);
  5160. mutex_unlock(&extent_root->fs_info->trans_mutex);
  5161. if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
  5162. /*
  5163. * try to update data extent references while
  5164. * keeping metadata shared between snapshots.
  5165. */
  5166. if (pass == 1) {
  5167. ret = relocate_one_path(trans, found_root,
  5168. path, &first_key, ref_path,
  5169. group, reloc_inode);
  5170. if (ret < 0)
  5171. goto out;
  5172. continue;
  5173. }
  5174. /*
  5175. * use fallback method to process the remaining
  5176. * references.
  5177. */
  5178. if (!new_extents) {
  5179. u64 group_start = group->key.objectid;
  5180. new_extents = kmalloc(sizeof(*new_extents),
  5181. GFP_NOFS);
  5182. nr_extents = 1;
  5183. ret = get_new_locations(reloc_inode,
  5184. extent_key,
  5185. group_start, 1,
  5186. &new_extents,
  5187. &nr_extents);
  5188. if (ret)
  5189. goto out;
  5190. }
  5191. ret = replace_one_extent(trans, found_root,
  5192. path, extent_key,
  5193. &first_key, ref_path,
  5194. new_extents, nr_extents);
  5195. } else {
  5196. ret = relocate_tree_block(trans, found_root, path,
  5197. &first_key, ref_path);
  5198. }
  5199. if (ret < 0)
  5200. goto out;
  5201. }
  5202. ret = 0;
  5203. out:
  5204. btrfs_end_transaction(trans, extent_root);
  5205. kfree(new_extents);
  5206. kfree(ref_path);
  5207. return ret;
  5208. }
  5209. static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
  5210. {
  5211. u64 num_devices;
  5212. u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
  5213. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
  5214. num_devices = root->fs_info->fs_devices->rw_devices;
  5215. if (num_devices == 1) {
  5216. stripped |= BTRFS_BLOCK_GROUP_DUP;
  5217. stripped = flags & ~stripped;
  5218. /* turn raid0 into single device chunks */
  5219. if (flags & BTRFS_BLOCK_GROUP_RAID0)
  5220. return stripped;
  5221. /* turn mirroring into duplication */
  5222. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  5223. BTRFS_BLOCK_GROUP_RAID10))
  5224. return stripped | BTRFS_BLOCK_GROUP_DUP;
  5225. return flags;
  5226. } else {
  5227. /* they already had raid on here, just return */
  5228. if (flags & stripped)
  5229. return flags;
  5230. stripped |= BTRFS_BLOCK_GROUP_DUP;
  5231. stripped = flags & ~stripped;
  5232. /* switch duplicated blocks with raid1 */
  5233. if (flags & BTRFS_BLOCK_GROUP_DUP)
  5234. return stripped | BTRFS_BLOCK_GROUP_RAID1;
  5235. /* turn single device chunks into raid0 */
  5236. return stripped | BTRFS_BLOCK_GROUP_RAID0;
  5237. }
  5238. return flags;
  5239. }
  5240. static int __alloc_chunk_for_shrink(struct btrfs_root *root,
  5241. struct btrfs_block_group_cache *shrink_block_group,
  5242. int force)
  5243. {
  5244. struct btrfs_trans_handle *trans;
  5245. u64 new_alloc_flags;
  5246. u64 calc;
  5247. spin_lock(&shrink_block_group->lock);
  5248. if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
  5249. spin_unlock(&shrink_block_group->lock);
  5250. trans = btrfs_start_transaction(root, 1);
  5251. spin_lock(&shrink_block_group->lock);
  5252. new_alloc_flags = update_block_group_flags(root,
  5253. shrink_block_group->flags);
  5254. if (new_alloc_flags != shrink_block_group->flags) {
  5255. calc =
  5256. btrfs_block_group_used(&shrink_block_group->item);
  5257. } else {
  5258. calc = shrink_block_group->key.offset;
  5259. }
  5260. spin_unlock(&shrink_block_group->lock);
  5261. do_chunk_alloc(trans, root->fs_info->extent_root,
  5262. calc + 2 * 1024 * 1024, new_alloc_flags, force);
  5263. btrfs_end_transaction(trans, root);
  5264. } else
  5265. spin_unlock(&shrink_block_group->lock);
  5266. return 0;
  5267. }
  5268. static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
  5269. struct btrfs_root *root,
  5270. u64 objectid, u64 size)
  5271. {
  5272. struct btrfs_path *path;
  5273. struct btrfs_inode_item *item;
  5274. struct extent_buffer *leaf;
  5275. int ret;
  5276. path = btrfs_alloc_path();
  5277. if (!path)
  5278. return -ENOMEM;
  5279. ret = btrfs_insert_empty_inode(trans, root, path, objectid);
  5280. if (ret)
  5281. goto out;
  5282. leaf = path->nodes[0];
  5283. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
  5284. memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
  5285. btrfs_set_inode_generation(leaf, item, 1);
  5286. btrfs_set_inode_size(leaf, item, size);
  5287. btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
  5288. btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
  5289. btrfs_mark_buffer_dirty(leaf);
  5290. btrfs_release_path(root, path);
  5291. out:
  5292. btrfs_free_path(path);
  5293. return ret;
  5294. }
  5295. static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
  5296. struct btrfs_block_group_cache *group)
  5297. {
  5298. struct inode *inode = NULL;
  5299. struct btrfs_trans_handle *trans;
  5300. struct btrfs_root *root;
  5301. struct btrfs_key root_key;
  5302. u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
  5303. int err = 0;
  5304. root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
  5305. root_key.type = BTRFS_ROOT_ITEM_KEY;
  5306. root_key.offset = (u64)-1;
  5307. root = btrfs_read_fs_root_no_name(fs_info, &root_key);
  5308. if (IS_ERR(root))
  5309. return ERR_CAST(root);
  5310. trans = btrfs_start_transaction(root, 1);
  5311. BUG_ON(!trans);
  5312. err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
  5313. if (err)
  5314. goto out;
  5315. err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
  5316. BUG_ON(err);
  5317. err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
  5318. group->key.offset, 0, group->key.offset,
  5319. 0, 0, 0);
  5320. BUG_ON(err);
  5321. inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
  5322. if (inode->i_state & I_NEW) {
  5323. BTRFS_I(inode)->root = root;
  5324. BTRFS_I(inode)->location.objectid = objectid;
  5325. BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
  5326. BTRFS_I(inode)->location.offset = 0;
  5327. btrfs_read_locked_inode(inode);
  5328. unlock_new_inode(inode);
  5329. BUG_ON(is_bad_inode(inode));
  5330. } else {
  5331. BUG_ON(1);
  5332. }
  5333. BTRFS_I(inode)->index_cnt = group->key.objectid;
  5334. err = btrfs_orphan_add(trans, inode);
  5335. out:
  5336. btrfs_end_transaction(trans, root);
  5337. if (err) {
  5338. if (inode)
  5339. iput(inode);
  5340. inode = ERR_PTR(err);
  5341. }
  5342. return inode;
  5343. }
  5344. int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
  5345. {
  5346. struct btrfs_ordered_sum *sums;
  5347. struct btrfs_sector_sum *sector_sum;
  5348. struct btrfs_ordered_extent *ordered;
  5349. struct btrfs_root *root = BTRFS_I(inode)->root;
  5350. struct list_head list;
  5351. size_t offset;
  5352. int ret;
  5353. u64 disk_bytenr;
  5354. INIT_LIST_HEAD(&list);
  5355. ordered = btrfs_lookup_ordered_extent(inode, file_pos);
  5356. BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
  5357. disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
  5358. ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
  5359. disk_bytenr + len - 1, &list);
  5360. while (!list_empty(&list)) {
  5361. sums = list_entry(list.next, struct btrfs_ordered_sum, list);
  5362. list_del_init(&sums->list);
  5363. sector_sum = sums->sums;
  5364. sums->bytenr = ordered->start;
  5365. offset = 0;
  5366. while (offset < sums->len) {
  5367. sector_sum->bytenr += ordered->start - disk_bytenr;
  5368. sector_sum++;
  5369. offset += root->sectorsize;
  5370. }
  5371. btrfs_add_ordered_sum(inode, ordered, sums);
  5372. }
  5373. btrfs_put_ordered_extent(ordered);
  5374. return 0;
  5375. }
  5376. int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
  5377. {
  5378. struct btrfs_trans_handle *trans;
  5379. struct btrfs_path *path;
  5380. struct btrfs_fs_info *info = root->fs_info;
  5381. struct extent_buffer *leaf;
  5382. struct inode *reloc_inode;
  5383. struct btrfs_block_group_cache *block_group;
  5384. struct btrfs_key key;
  5385. u64 skipped;
  5386. u64 cur_byte;
  5387. u64 total_found;
  5388. u32 nritems;
  5389. int ret;
  5390. int progress;
  5391. int pass = 0;
  5392. root = root->fs_info->extent_root;
  5393. block_group = btrfs_lookup_block_group(info, group_start);
  5394. BUG_ON(!block_group);
  5395. printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n",
  5396. (unsigned long long)block_group->key.objectid,
  5397. (unsigned long long)block_group->flags);
  5398. path = btrfs_alloc_path();
  5399. BUG_ON(!path);
  5400. reloc_inode = create_reloc_inode(info, block_group);
  5401. BUG_ON(IS_ERR(reloc_inode));
  5402. __alloc_chunk_for_shrink(root, block_group, 1);
  5403. set_block_group_readonly(block_group);
  5404. btrfs_start_delalloc_inodes(info->tree_root);
  5405. btrfs_wait_ordered_extents(info->tree_root, 0);
  5406. again:
  5407. skipped = 0;
  5408. total_found = 0;
  5409. progress = 0;
  5410. key.objectid = block_group->key.objectid;
  5411. key.offset = 0;
  5412. key.type = 0;
  5413. cur_byte = key.objectid;
  5414. trans = btrfs_start_transaction(info->tree_root, 1);
  5415. btrfs_commit_transaction(trans, info->tree_root);
  5416. mutex_lock(&root->fs_info->cleaner_mutex);
  5417. btrfs_clean_old_snapshots(info->tree_root);
  5418. btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
  5419. mutex_unlock(&root->fs_info->cleaner_mutex);
  5420. while (1) {
  5421. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  5422. if (ret < 0)
  5423. goto out;
  5424. next:
  5425. leaf = path->nodes[0];
  5426. nritems = btrfs_header_nritems(leaf);
  5427. if (path->slots[0] >= nritems) {
  5428. ret = btrfs_next_leaf(root, path);
  5429. if (ret < 0)
  5430. goto out;
  5431. if (ret == 1) {
  5432. ret = 0;
  5433. break;
  5434. }
  5435. leaf = path->nodes[0];
  5436. nritems = btrfs_header_nritems(leaf);
  5437. }
  5438. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  5439. if (key.objectid >= block_group->key.objectid +
  5440. block_group->key.offset)
  5441. break;
  5442. if (progress && need_resched()) {
  5443. btrfs_release_path(root, path);
  5444. cond_resched();
  5445. progress = 0;
  5446. continue;
  5447. }
  5448. progress = 1;
  5449. if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
  5450. key.objectid + key.offset <= cur_byte) {
  5451. path->slots[0]++;
  5452. goto next;
  5453. }
  5454. total_found++;
  5455. cur_byte = key.objectid + key.offset;
  5456. btrfs_release_path(root, path);
  5457. __alloc_chunk_for_shrink(root, block_group, 0);
  5458. ret = relocate_one_extent(root, path, &key, block_group,
  5459. reloc_inode, pass);
  5460. BUG_ON(ret < 0);
  5461. if (ret > 0)
  5462. skipped++;
  5463. key.objectid = cur_byte;
  5464. key.type = 0;
  5465. key.offset = 0;
  5466. }
  5467. btrfs_release_path(root, path);
  5468. if (pass == 0) {
  5469. btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
  5470. invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
  5471. }
  5472. if (total_found > 0) {
  5473. printk(KERN_INFO "btrfs found %llu extents in pass %d\n",
  5474. (unsigned long long)total_found, pass);
  5475. pass++;
  5476. if (total_found == skipped && pass > 2) {
  5477. iput(reloc_inode);
  5478. reloc_inode = create_reloc_inode(info, block_group);
  5479. pass = 0;
  5480. }
  5481. goto again;
  5482. }
  5483. /* delete reloc_inode */
  5484. iput(reloc_inode);
  5485. /* unpin extents in this range */
  5486. trans = btrfs_start_transaction(info->tree_root, 1);
  5487. btrfs_commit_transaction(trans, info->tree_root);
  5488. spin_lock(&block_group->lock);
  5489. WARN_ON(block_group->pinned > 0);
  5490. WARN_ON(block_group->reserved > 0);
  5491. WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
  5492. spin_unlock(&block_group->lock);
  5493. put_block_group(block_group);
  5494. ret = 0;
  5495. out:
  5496. btrfs_free_path(path);
  5497. return ret;
  5498. }
  5499. static int find_first_block_group(struct btrfs_root *root,
  5500. struct btrfs_path *path, struct btrfs_key *key)
  5501. {
  5502. int ret = 0;
  5503. struct btrfs_key found_key;
  5504. struct extent_buffer *leaf;
  5505. int slot;
  5506. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  5507. if (ret < 0)
  5508. goto out;
  5509. while (1) {
  5510. slot = path->slots[0];
  5511. leaf = path->nodes[0];
  5512. if (slot >= btrfs_header_nritems(leaf)) {
  5513. ret = btrfs_next_leaf(root, path);
  5514. if (ret == 0)
  5515. continue;
  5516. if (ret < 0)
  5517. goto out;
  5518. break;
  5519. }
  5520. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  5521. if (found_key.objectid >= key->objectid &&
  5522. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  5523. ret = 0;
  5524. goto out;
  5525. }
  5526. path->slots[0]++;
  5527. }
  5528. ret = -ENOENT;
  5529. out:
  5530. return ret;
  5531. }
  5532. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  5533. {
  5534. struct btrfs_block_group_cache *block_group;
  5535. struct rb_node *n;
  5536. spin_lock(&info->block_group_cache_lock);
  5537. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  5538. block_group = rb_entry(n, struct btrfs_block_group_cache,
  5539. cache_node);
  5540. rb_erase(&block_group->cache_node,
  5541. &info->block_group_cache_tree);
  5542. spin_unlock(&info->block_group_cache_lock);
  5543. btrfs_remove_free_space_cache(block_group);
  5544. down_write(&block_group->space_info->groups_sem);
  5545. list_del(&block_group->list);
  5546. up_write(&block_group->space_info->groups_sem);
  5547. WARN_ON(atomic_read(&block_group->count) != 1);
  5548. kfree(block_group);
  5549. spin_lock(&info->block_group_cache_lock);
  5550. }
  5551. spin_unlock(&info->block_group_cache_lock);
  5552. return 0;
  5553. }
  5554. int btrfs_read_block_groups(struct btrfs_root *root)
  5555. {
  5556. struct btrfs_path *path;
  5557. int ret;
  5558. struct btrfs_block_group_cache *cache;
  5559. struct btrfs_fs_info *info = root->fs_info;
  5560. struct btrfs_space_info *space_info;
  5561. struct btrfs_key key;
  5562. struct btrfs_key found_key;
  5563. struct extent_buffer *leaf;
  5564. root = info->extent_root;
  5565. key.objectid = 0;
  5566. key.offset = 0;
  5567. btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
  5568. path = btrfs_alloc_path();
  5569. if (!path)
  5570. return -ENOMEM;
  5571. while (1) {
  5572. ret = find_first_block_group(root, path, &key);
  5573. if (ret > 0) {
  5574. ret = 0;
  5575. goto error;
  5576. }
  5577. if (ret != 0)
  5578. goto error;
  5579. leaf = path->nodes[0];
  5580. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  5581. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  5582. if (!cache) {
  5583. ret = -ENOMEM;
  5584. break;
  5585. }
  5586. atomic_set(&cache->count, 1);
  5587. spin_lock_init(&cache->lock);
  5588. mutex_init(&cache->alloc_mutex);
  5589. mutex_init(&cache->cache_mutex);
  5590. INIT_LIST_HEAD(&cache->list);
  5591. read_extent_buffer(leaf, &cache->item,
  5592. btrfs_item_ptr_offset(leaf, path->slots[0]),
  5593. sizeof(cache->item));
  5594. memcpy(&cache->key, &found_key, sizeof(found_key));
  5595. key.objectid = found_key.objectid + found_key.offset;
  5596. btrfs_release_path(root, path);
  5597. cache->flags = btrfs_block_group_flags(&cache->item);
  5598. ret = update_space_info(info, cache->flags, found_key.offset,
  5599. btrfs_block_group_used(&cache->item),
  5600. &space_info);
  5601. BUG_ON(ret);
  5602. cache->space_info = space_info;
  5603. down_write(&space_info->groups_sem);
  5604. list_add_tail(&cache->list, &space_info->block_groups);
  5605. up_write(&space_info->groups_sem);
  5606. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  5607. BUG_ON(ret);
  5608. set_avail_alloc_bits(root->fs_info, cache->flags);
  5609. if (btrfs_chunk_readonly(root, cache->key.objectid))
  5610. set_block_group_readonly(cache);
  5611. }
  5612. ret = 0;
  5613. error:
  5614. btrfs_free_path(path);
  5615. return ret;
  5616. }
  5617. int btrfs_make_block_group(struct btrfs_trans_handle *trans,
  5618. struct btrfs_root *root, u64 bytes_used,
  5619. u64 type, u64 chunk_objectid, u64 chunk_offset,
  5620. u64 size)
  5621. {
  5622. int ret;
  5623. struct btrfs_root *extent_root;
  5624. struct btrfs_block_group_cache *cache;
  5625. extent_root = root->fs_info->extent_root;
  5626. root->fs_info->last_trans_new_blockgroup = trans->transid;
  5627. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  5628. if (!cache)
  5629. return -ENOMEM;
  5630. cache->key.objectid = chunk_offset;
  5631. cache->key.offset = size;
  5632. cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  5633. atomic_set(&cache->count, 1);
  5634. spin_lock_init(&cache->lock);
  5635. mutex_init(&cache->alloc_mutex);
  5636. mutex_init(&cache->cache_mutex);
  5637. INIT_LIST_HEAD(&cache->list);
  5638. btrfs_set_block_group_used(&cache->item, bytes_used);
  5639. btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
  5640. cache->flags = type;
  5641. btrfs_set_block_group_flags(&cache->item, type);
  5642. ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
  5643. &cache->space_info);
  5644. BUG_ON(ret);
  5645. down_write(&cache->space_info->groups_sem);
  5646. list_add_tail(&cache->list, &cache->space_info->block_groups);
  5647. up_write(&cache->space_info->groups_sem);
  5648. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  5649. BUG_ON(ret);
  5650. ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
  5651. sizeof(cache->item));
  5652. BUG_ON(ret);
  5653. finish_current_insert(trans, extent_root, 0);
  5654. ret = del_pending_extents(trans, extent_root, 0);
  5655. BUG_ON(ret);
  5656. set_avail_alloc_bits(extent_root->fs_info, type);
  5657. return 0;
  5658. }
  5659. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  5660. struct btrfs_root *root, u64 group_start)
  5661. {
  5662. struct btrfs_path *path;
  5663. struct btrfs_block_group_cache *block_group;
  5664. struct btrfs_key key;
  5665. int ret;
  5666. root = root->fs_info->extent_root;
  5667. block_group = btrfs_lookup_block_group(root->fs_info, group_start);
  5668. BUG_ON(!block_group);
  5669. BUG_ON(!block_group->ro);
  5670. memcpy(&key, &block_group->key, sizeof(key));
  5671. path = btrfs_alloc_path();
  5672. BUG_ON(!path);
  5673. spin_lock(&root->fs_info->block_group_cache_lock);
  5674. rb_erase(&block_group->cache_node,
  5675. &root->fs_info->block_group_cache_tree);
  5676. spin_unlock(&root->fs_info->block_group_cache_lock);
  5677. btrfs_remove_free_space_cache(block_group);
  5678. down_write(&block_group->space_info->groups_sem);
  5679. list_del(&block_group->list);
  5680. up_write(&block_group->space_info->groups_sem);
  5681. spin_lock(&block_group->space_info->lock);
  5682. block_group->space_info->total_bytes -= block_group->key.offset;
  5683. block_group->space_info->bytes_readonly -= block_group->key.offset;
  5684. spin_unlock(&block_group->space_info->lock);
  5685. block_group->space_info->full = 0;
  5686. put_block_group(block_group);
  5687. put_block_group(block_group);
  5688. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  5689. if (ret > 0)
  5690. ret = -EIO;
  5691. if (ret < 0)
  5692. goto out;
  5693. ret = btrfs_del_item(trans, root, path);
  5694. out:
  5695. btrfs_free_path(path);
  5696. return ret;
  5697. }