extent-tree.c 218 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/sort.h>
  23. #include <linux/rcupdate.h>
  24. #include <linux/kthread.h>
  25. #include <linux/slab.h>
  26. #include <linux/ratelimit.h>
  27. #include "compat.h"
  28. #include "hash.h"
  29. #include "ctree.h"
  30. #include "disk-io.h"
  31. #include "print-tree.h"
  32. #include "transaction.h"
  33. #include "volumes.h"
  34. #include "locking.h"
  35. #include "free-space-cache.h"
  36. #include "math.h"
  37. #undef SCRAMBLE_DELAYED_REFS
  38. /*
  39. * control flags for do_chunk_alloc's force field
  40. * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
  41. * if we really need one.
  42. *
  43. * CHUNK_ALLOC_LIMITED means to only try and allocate one
  44. * if we have very few chunks already allocated. This is
  45. * used as part of the clustering code to help make sure
  46. * we have a good pool of storage to cluster in, without
  47. * filling the FS with empty chunks
  48. *
  49. * CHUNK_ALLOC_FORCE means it must try to allocate one
  50. *
  51. */
  52. enum {
  53. CHUNK_ALLOC_NO_FORCE = 0,
  54. CHUNK_ALLOC_LIMITED = 1,
  55. CHUNK_ALLOC_FORCE = 2,
  56. };
  57. /*
  58. * Control how reservations are dealt with.
  59. *
  60. * RESERVE_FREE - freeing a reservation.
  61. * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
  62. * ENOSPC accounting
  63. * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
  64. * bytes_may_use as the ENOSPC accounting is done elsewhere
  65. */
  66. enum {
  67. RESERVE_FREE = 0,
  68. RESERVE_ALLOC = 1,
  69. RESERVE_ALLOC_NO_ACCOUNT = 2,
  70. };
  71. static int update_block_group(struct btrfs_root *root,
  72. u64 bytenr, u64 num_bytes, int alloc);
  73. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  74. struct btrfs_root *root,
  75. u64 bytenr, u64 num_bytes, u64 parent,
  76. u64 root_objectid, u64 owner_objectid,
  77. u64 owner_offset, int refs_to_drop,
  78. struct btrfs_delayed_extent_op *extra_op);
  79. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  80. struct extent_buffer *leaf,
  81. struct btrfs_extent_item *ei);
  82. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  83. struct btrfs_root *root,
  84. u64 parent, u64 root_objectid,
  85. u64 flags, u64 owner, u64 offset,
  86. struct btrfs_key *ins, int ref_mod);
  87. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  88. struct btrfs_root *root,
  89. u64 parent, u64 root_objectid,
  90. u64 flags, struct btrfs_disk_key *key,
  91. int level, struct btrfs_key *ins);
  92. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  93. struct btrfs_root *extent_root, u64 flags,
  94. int force);
  95. static int find_next_key(struct btrfs_path *path, int level,
  96. struct btrfs_key *key);
  97. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  98. int dump_block_groups);
  99. static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
  100. u64 num_bytes, int reserve);
  101. static noinline int
  102. block_group_cache_done(struct btrfs_block_group_cache *cache)
  103. {
  104. smp_mb();
  105. return cache->cached == BTRFS_CACHE_FINISHED;
  106. }
  107. static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
  108. {
  109. return (cache->flags & bits) == bits;
  110. }
  111. static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
  112. {
  113. atomic_inc(&cache->count);
  114. }
  115. void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
  116. {
  117. if (atomic_dec_and_test(&cache->count)) {
  118. WARN_ON(cache->pinned > 0);
  119. WARN_ON(cache->reserved > 0);
  120. kfree(cache->free_space_ctl);
  121. kfree(cache);
  122. }
  123. }
  124. /*
  125. * this adds the block group to the fs_info rb tree for the block group
  126. * cache
  127. */
  128. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  129. struct btrfs_block_group_cache *block_group)
  130. {
  131. struct rb_node **p;
  132. struct rb_node *parent = NULL;
  133. struct btrfs_block_group_cache *cache;
  134. spin_lock(&info->block_group_cache_lock);
  135. p = &info->block_group_cache_tree.rb_node;
  136. while (*p) {
  137. parent = *p;
  138. cache = rb_entry(parent, struct btrfs_block_group_cache,
  139. cache_node);
  140. if (block_group->key.objectid < cache->key.objectid) {
  141. p = &(*p)->rb_left;
  142. } else if (block_group->key.objectid > cache->key.objectid) {
  143. p = &(*p)->rb_right;
  144. } else {
  145. spin_unlock(&info->block_group_cache_lock);
  146. return -EEXIST;
  147. }
  148. }
  149. rb_link_node(&block_group->cache_node, parent, p);
  150. rb_insert_color(&block_group->cache_node,
  151. &info->block_group_cache_tree);
  152. if (info->first_logical_byte > block_group->key.objectid)
  153. info->first_logical_byte = block_group->key.objectid;
  154. spin_unlock(&info->block_group_cache_lock);
  155. return 0;
  156. }
  157. /*
  158. * This will return the block group at or after bytenr if contains is 0, else
  159. * it will return the block group that contains the bytenr
  160. */
  161. static struct btrfs_block_group_cache *
  162. block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
  163. int contains)
  164. {
  165. struct btrfs_block_group_cache *cache, *ret = NULL;
  166. struct rb_node *n;
  167. u64 end, start;
  168. spin_lock(&info->block_group_cache_lock);
  169. n = info->block_group_cache_tree.rb_node;
  170. while (n) {
  171. cache = rb_entry(n, struct btrfs_block_group_cache,
  172. cache_node);
  173. end = cache->key.objectid + cache->key.offset - 1;
  174. start = cache->key.objectid;
  175. if (bytenr < start) {
  176. if (!contains && (!ret || start < ret->key.objectid))
  177. ret = cache;
  178. n = n->rb_left;
  179. } else if (bytenr > start) {
  180. if (contains && bytenr <= end) {
  181. ret = cache;
  182. break;
  183. }
  184. n = n->rb_right;
  185. } else {
  186. ret = cache;
  187. break;
  188. }
  189. }
  190. if (ret) {
  191. btrfs_get_block_group(ret);
  192. if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
  193. info->first_logical_byte = ret->key.objectid;
  194. }
  195. spin_unlock(&info->block_group_cache_lock);
  196. return ret;
  197. }
  198. static int add_excluded_extent(struct btrfs_root *root,
  199. u64 start, u64 num_bytes)
  200. {
  201. u64 end = start + num_bytes - 1;
  202. set_extent_bits(&root->fs_info->freed_extents[0],
  203. start, end, EXTENT_UPTODATE, GFP_NOFS);
  204. set_extent_bits(&root->fs_info->freed_extents[1],
  205. start, end, EXTENT_UPTODATE, GFP_NOFS);
  206. return 0;
  207. }
  208. static void free_excluded_extents(struct btrfs_root *root,
  209. struct btrfs_block_group_cache *cache)
  210. {
  211. u64 start, end;
  212. start = cache->key.objectid;
  213. end = start + cache->key.offset - 1;
  214. clear_extent_bits(&root->fs_info->freed_extents[0],
  215. start, end, EXTENT_UPTODATE, GFP_NOFS);
  216. clear_extent_bits(&root->fs_info->freed_extents[1],
  217. start, end, EXTENT_UPTODATE, GFP_NOFS);
  218. }
  219. static int exclude_super_stripes(struct btrfs_root *root,
  220. struct btrfs_block_group_cache *cache)
  221. {
  222. u64 bytenr;
  223. u64 *logical;
  224. int stripe_len;
  225. int i, nr, ret;
  226. if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
  227. stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
  228. cache->bytes_super += stripe_len;
  229. ret = add_excluded_extent(root, cache->key.objectid,
  230. stripe_len);
  231. BUG_ON(ret); /* -ENOMEM */
  232. }
  233. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  234. bytenr = btrfs_sb_offset(i);
  235. ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
  236. cache->key.objectid, bytenr,
  237. 0, &logical, &nr, &stripe_len);
  238. BUG_ON(ret); /* -ENOMEM */
  239. while (nr--) {
  240. cache->bytes_super += stripe_len;
  241. ret = add_excluded_extent(root, logical[nr],
  242. stripe_len);
  243. BUG_ON(ret); /* -ENOMEM */
  244. }
  245. kfree(logical);
  246. }
  247. return 0;
  248. }
  249. static struct btrfs_caching_control *
  250. get_caching_control(struct btrfs_block_group_cache *cache)
  251. {
  252. struct btrfs_caching_control *ctl;
  253. spin_lock(&cache->lock);
  254. if (cache->cached != BTRFS_CACHE_STARTED) {
  255. spin_unlock(&cache->lock);
  256. return NULL;
  257. }
  258. /* We're loading it the fast way, so we don't have a caching_ctl. */
  259. if (!cache->caching_ctl) {
  260. spin_unlock(&cache->lock);
  261. return NULL;
  262. }
  263. ctl = cache->caching_ctl;
  264. atomic_inc(&ctl->count);
  265. spin_unlock(&cache->lock);
  266. return ctl;
  267. }
  268. static void put_caching_control(struct btrfs_caching_control *ctl)
  269. {
  270. if (atomic_dec_and_test(&ctl->count))
  271. kfree(ctl);
  272. }
  273. /*
  274. * this is only called by cache_block_group, since we could have freed extents
  275. * we need to check the pinned_extents for any extents that can't be used yet
  276. * since their free space will be released as soon as the transaction commits.
  277. */
  278. static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
  279. struct btrfs_fs_info *info, u64 start, u64 end)
  280. {
  281. u64 extent_start, extent_end, size, total_added = 0;
  282. int ret;
  283. while (start < end) {
  284. ret = find_first_extent_bit(info->pinned_extents, start,
  285. &extent_start, &extent_end,
  286. EXTENT_DIRTY | EXTENT_UPTODATE,
  287. NULL);
  288. if (ret)
  289. break;
  290. if (extent_start <= start) {
  291. start = extent_end + 1;
  292. } else if (extent_start > start && extent_start < end) {
  293. size = extent_start - start;
  294. total_added += size;
  295. ret = btrfs_add_free_space(block_group, start,
  296. size);
  297. BUG_ON(ret); /* -ENOMEM or logic error */
  298. start = extent_end + 1;
  299. } else {
  300. break;
  301. }
  302. }
  303. if (start < end) {
  304. size = end - start;
  305. total_added += size;
  306. ret = btrfs_add_free_space(block_group, start, size);
  307. BUG_ON(ret); /* -ENOMEM or logic error */
  308. }
  309. return total_added;
  310. }
  311. static noinline void caching_thread(struct btrfs_work *work)
  312. {
  313. struct btrfs_block_group_cache *block_group;
  314. struct btrfs_fs_info *fs_info;
  315. struct btrfs_caching_control *caching_ctl;
  316. struct btrfs_root *extent_root;
  317. struct btrfs_path *path;
  318. struct extent_buffer *leaf;
  319. struct btrfs_key key;
  320. u64 total_found = 0;
  321. u64 last = 0;
  322. u32 nritems;
  323. int ret = 0;
  324. caching_ctl = container_of(work, struct btrfs_caching_control, work);
  325. block_group = caching_ctl->block_group;
  326. fs_info = block_group->fs_info;
  327. extent_root = fs_info->extent_root;
  328. path = btrfs_alloc_path();
  329. if (!path)
  330. goto out;
  331. last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
  332. /*
  333. * We don't want to deadlock with somebody trying to allocate a new
  334. * extent for the extent root while also trying to search the extent
  335. * root to add free space. So we skip locking and search the commit
  336. * root, since its read-only
  337. */
  338. path->skip_locking = 1;
  339. path->search_commit_root = 1;
  340. path->reada = 1;
  341. key.objectid = last;
  342. key.offset = 0;
  343. key.type = BTRFS_EXTENT_ITEM_KEY;
  344. again:
  345. mutex_lock(&caching_ctl->mutex);
  346. /* need to make sure the commit_root doesn't disappear */
  347. down_read(&fs_info->extent_commit_sem);
  348. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  349. if (ret < 0)
  350. goto err;
  351. leaf = path->nodes[0];
  352. nritems = btrfs_header_nritems(leaf);
  353. while (1) {
  354. if (btrfs_fs_closing(fs_info) > 1) {
  355. last = (u64)-1;
  356. break;
  357. }
  358. if (path->slots[0] < nritems) {
  359. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  360. } else {
  361. ret = find_next_key(path, 0, &key);
  362. if (ret)
  363. break;
  364. if (need_resched() ||
  365. btrfs_next_leaf(extent_root, path)) {
  366. caching_ctl->progress = last;
  367. btrfs_release_path(path);
  368. up_read(&fs_info->extent_commit_sem);
  369. mutex_unlock(&caching_ctl->mutex);
  370. cond_resched();
  371. goto again;
  372. }
  373. leaf = path->nodes[0];
  374. nritems = btrfs_header_nritems(leaf);
  375. continue;
  376. }
  377. if (key.objectid < block_group->key.objectid) {
  378. path->slots[0]++;
  379. continue;
  380. }
  381. if (key.objectid >= block_group->key.objectid +
  382. block_group->key.offset)
  383. break;
  384. if (key.type == BTRFS_EXTENT_ITEM_KEY) {
  385. total_found += add_new_free_space(block_group,
  386. fs_info, last,
  387. key.objectid);
  388. last = key.objectid + key.offset;
  389. if (total_found > (1024 * 1024 * 2)) {
  390. total_found = 0;
  391. wake_up(&caching_ctl->wait);
  392. }
  393. }
  394. path->slots[0]++;
  395. }
  396. ret = 0;
  397. total_found += add_new_free_space(block_group, fs_info, last,
  398. block_group->key.objectid +
  399. block_group->key.offset);
  400. caching_ctl->progress = (u64)-1;
  401. spin_lock(&block_group->lock);
  402. block_group->caching_ctl = NULL;
  403. block_group->cached = BTRFS_CACHE_FINISHED;
  404. spin_unlock(&block_group->lock);
  405. err:
  406. btrfs_free_path(path);
  407. up_read(&fs_info->extent_commit_sem);
  408. free_excluded_extents(extent_root, block_group);
  409. mutex_unlock(&caching_ctl->mutex);
  410. out:
  411. wake_up(&caching_ctl->wait);
  412. put_caching_control(caching_ctl);
  413. btrfs_put_block_group(block_group);
  414. }
  415. static int cache_block_group(struct btrfs_block_group_cache *cache,
  416. int load_cache_only)
  417. {
  418. DEFINE_WAIT(wait);
  419. struct btrfs_fs_info *fs_info = cache->fs_info;
  420. struct btrfs_caching_control *caching_ctl;
  421. int ret = 0;
  422. caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
  423. if (!caching_ctl)
  424. return -ENOMEM;
  425. INIT_LIST_HEAD(&caching_ctl->list);
  426. mutex_init(&caching_ctl->mutex);
  427. init_waitqueue_head(&caching_ctl->wait);
  428. caching_ctl->block_group = cache;
  429. caching_ctl->progress = cache->key.objectid;
  430. atomic_set(&caching_ctl->count, 1);
  431. caching_ctl->work.func = caching_thread;
  432. spin_lock(&cache->lock);
  433. /*
  434. * This should be a rare occasion, but this could happen I think in the
  435. * case where one thread starts to load the space cache info, and then
  436. * some other thread starts a transaction commit which tries to do an
  437. * allocation while the other thread is still loading the space cache
  438. * info. The previous loop should have kept us from choosing this block
  439. * group, but if we've moved to the state where we will wait on caching
  440. * block groups we need to first check if we're doing a fast load here,
  441. * so we can wait for it to finish, otherwise we could end up allocating
  442. * from a block group who's cache gets evicted for one reason or
  443. * another.
  444. */
  445. while (cache->cached == BTRFS_CACHE_FAST) {
  446. struct btrfs_caching_control *ctl;
  447. ctl = cache->caching_ctl;
  448. atomic_inc(&ctl->count);
  449. prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
  450. spin_unlock(&cache->lock);
  451. schedule();
  452. finish_wait(&ctl->wait, &wait);
  453. put_caching_control(ctl);
  454. spin_lock(&cache->lock);
  455. }
  456. if (cache->cached != BTRFS_CACHE_NO) {
  457. spin_unlock(&cache->lock);
  458. kfree(caching_ctl);
  459. return 0;
  460. }
  461. WARN_ON(cache->caching_ctl);
  462. cache->caching_ctl = caching_ctl;
  463. cache->cached = BTRFS_CACHE_FAST;
  464. spin_unlock(&cache->lock);
  465. if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
  466. ret = load_free_space_cache(fs_info, cache);
  467. spin_lock(&cache->lock);
  468. if (ret == 1) {
  469. cache->caching_ctl = NULL;
  470. cache->cached = BTRFS_CACHE_FINISHED;
  471. cache->last_byte_to_unpin = (u64)-1;
  472. } else {
  473. if (load_cache_only) {
  474. cache->caching_ctl = NULL;
  475. cache->cached = BTRFS_CACHE_NO;
  476. } else {
  477. cache->cached = BTRFS_CACHE_STARTED;
  478. }
  479. }
  480. spin_unlock(&cache->lock);
  481. wake_up(&caching_ctl->wait);
  482. if (ret == 1) {
  483. put_caching_control(caching_ctl);
  484. free_excluded_extents(fs_info->extent_root, cache);
  485. return 0;
  486. }
  487. } else {
  488. /*
  489. * We are not going to do the fast caching, set cached to the
  490. * appropriate value and wakeup any waiters.
  491. */
  492. spin_lock(&cache->lock);
  493. if (load_cache_only) {
  494. cache->caching_ctl = NULL;
  495. cache->cached = BTRFS_CACHE_NO;
  496. } else {
  497. cache->cached = BTRFS_CACHE_STARTED;
  498. }
  499. spin_unlock(&cache->lock);
  500. wake_up(&caching_ctl->wait);
  501. }
  502. if (load_cache_only) {
  503. put_caching_control(caching_ctl);
  504. return 0;
  505. }
  506. down_write(&fs_info->extent_commit_sem);
  507. atomic_inc(&caching_ctl->count);
  508. list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
  509. up_write(&fs_info->extent_commit_sem);
  510. btrfs_get_block_group(cache);
  511. btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
  512. return ret;
  513. }
  514. /*
  515. * return the block group that starts at or after bytenr
  516. */
  517. static struct btrfs_block_group_cache *
  518. btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
  519. {
  520. struct btrfs_block_group_cache *cache;
  521. cache = block_group_cache_tree_search(info, bytenr, 0);
  522. return cache;
  523. }
  524. /*
  525. * return the block group that contains the given bytenr
  526. */
  527. struct btrfs_block_group_cache *btrfs_lookup_block_group(
  528. struct btrfs_fs_info *info,
  529. u64 bytenr)
  530. {
  531. struct btrfs_block_group_cache *cache;
  532. cache = block_group_cache_tree_search(info, bytenr, 1);
  533. return cache;
  534. }
  535. static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
  536. u64 flags)
  537. {
  538. struct list_head *head = &info->space_info;
  539. struct btrfs_space_info *found;
  540. flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
  541. rcu_read_lock();
  542. list_for_each_entry_rcu(found, head, list) {
  543. if (found->flags & flags) {
  544. rcu_read_unlock();
  545. return found;
  546. }
  547. }
  548. rcu_read_unlock();
  549. return NULL;
  550. }
  551. /*
  552. * after adding space to the filesystem, we need to clear the full flags
  553. * on all the space infos.
  554. */
  555. void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
  556. {
  557. struct list_head *head = &info->space_info;
  558. struct btrfs_space_info *found;
  559. rcu_read_lock();
  560. list_for_each_entry_rcu(found, head, list)
  561. found->full = 0;
  562. rcu_read_unlock();
  563. }
  564. u64 btrfs_find_block_group(struct btrfs_root *root,
  565. u64 search_start, u64 search_hint, int owner)
  566. {
  567. struct btrfs_block_group_cache *cache;
  568. u64 used;
  569. u64 last = max(search_hint, search_start);
  570. u64 group_start = 0;
  571. int full_search = 0;
  572. int factor = 9;
  573. int wrapped = 0;
  574. again:
  575. while (1) {
  576. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  577. if (!cache)
  578. break;
  579. spin_lock(&cache->lock);
  580. last = cache->key.objectid + cache->key.offset;
  581. used = btrfs_block_group_used(&cache->item);
  582. if ((full_search || !cache->ro) &&
  583. block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
  584. if (used + cache->pinned + cache->reserved <
  585. div_factor(cache->key.offset, factor)) {
  586. group_start = cache->key.objectid;
  587. spin_unlock(&cache->lock);
  588. btrfs_put_block_group(cache);
  589. goto found;
  590. }
  591. }
  592. spin_unlock(&cache->lock);
  593. btrfs_put_block_group(cache);
  594. cond_resched();
  595. }
  596. if (!wrapped) {
  597. last = search_start;
  598. wrapped = 1;
  599. goto again;
  600. }
  601. if (!full_search && factor < 10) {
  602. last = search_start;
  603. full_search = 1;
  604. factor = 10;
  605. goto again;
  606. }
  607. found:
  608. return group_start;
  609. }
  610. /* simple helper to search for an existing extent at a given offset */
  611. int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
  612. {
  613. int ret;
  614. struct btrfs_key key;
  615. struct btrfs_path *path;
  616. path = btrfs_alloc_path();
  617. if (!path)
  618. return -ENOMEM;
  619. key.objectid = start;
  620. key.offset = len;
  621. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  622. ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
  623. 0, 0);
  624. btrfs_free_path(path);
  625. return ret;
  626. }
  627. /*
  628. * helper function to lookup reference count and flags of extent.
  629. *
  630. * the head node for delayed ref is used to store the sum of all the
  631. * reference count modifications queued up in the rbtree. the head
  632. * node may also store the extent flags to set. This way you can check
  633. * to see what the reference count and extent flags would be if all of
  634. * the delayed refs are not processed.
  635. */
  636. int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
  637. struct btrfs_root *root, u64 bytenr,
  638. u64 num_bytes, u64 *refs, u64 *flags)
  639. {
  640. struct btrfs_delayed_ref_head *head;
  641. struct btrfs_delayed_ref_root *delayed_refs;
  642. struct btrfs_path *path;
  643. struct btrfs_extent_item *ei;
  644. struct extent_buffer *leaf;
  645. struct btrfs_key key;
  646. u32 item_size;
  647. u64 num_refs;
  648. u64 extent_flags;
  649. int ret;
  650. path = btrfs_alloc_path();
  651. if (!path)
  652. return -ENOMEM;
  653. key.objectid = bytenr;
  654. key.type = BTRFS_EXTENT_ITEM_KEY;
  655. key.offset = num_bytes;
  656. if (!trans) {
  657. path->skip_locking = 1;
  658. path->search_commit_root = 1;
  659. }
  660. again:
  661. ret = btrfs_search_slot(trans, root->fs_info->extent_root,
  662. &key, path, 0, 0);
  663. if (ret < 0)
  664. goto out_free;
  665. if (ret == 0) {
  666. leaf = path->nodes[0];
  667. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  668. if (item_size >= sizeof(*ei)) {
  669. ei = btrfs_item_ptr(leaf, path->slots[0],
  670. struct btrfs_extent_item);
  671. num_refs = btrfs_extent_refs(leaf, ei);
  672. extent_flags = btrfs_extent_flags(leaf, ei);
  673. } else {
  674. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  675. struct btrfs_extent_item_v0 *ei0;
  676. BUG_ON(item_size != sizeof(*ei0));
  677. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  678. struct btrfs_extent_item_v0);
  679. num_refs = btrfs_extent_refs_v0(leaf, ei0);
  680. /* FIXME: this isn't correct for data */
  681. extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  682. #else
  683. BUG();
  684. #endif
  685. }
  686. BUG_ON(num_refs == 0);
  687. } else {
  688. num_refs = 0;
  689. extent_flags = 0;
  690. ret = 0;
  691. }
  692. if (!trans)
  693. goto out;
  694. delayed_refs = &trans->transaction->delayed_refs;
  695. spin_lock(&delayed_refs->lock);
  696. head = btrfs_find_delayed_ref_head(trans, bytenr);
  697. if (head) {
  698. if (!mutex_trylock(&head->mutex)) {
  699. atomic_inc(&head->node.refs);
  700. spin_unlock(&delayed_refs->lock);
  701. btrfs_release_path(path);
  702. /*
  703. * Mutex was contended, block until it's released and try
  704. * again
  705. */
  706. mutex_lock(&head->mutex);
  707. mutex_unlock(&head->mutex);
  708. btrfs_put_delayed_ref(&head->node);
  709. goto again;
  710. }
  711. if (head->extent_op && head->extent_op->update_flags)
  712. extent_flags |= head->extent_op->flags_to_set;
  713. else
  714. BUG_ON(num_refs == 0);
  715. num_refs += head->node.ref_mod;
  716. mutex_unlock(&head->mutex);
  717. }
  718. spin_unlock(&delayed_refs->lock);
  719. out:
  720. WARN_ON(num_refs == 0);
  721. if (refs)
  722. *refs = num_refs;
  723. if (flags)
  724. *flags = extent_flags;
  725. out_free:
  726. btrfs_free_path(path);
  727. return ret;
  728. }
  729. /*
  730. * Back reference rules. Back refs have three main goals:
  731. *
  732. * 1) differentiate between all holders of references to an extent so that
  733. * when a reference is dropped we can make sure it was a valid reference
  734. * before freeing the extent.
  735. *
  736. * 2) Provide enough information to quickly find the holders of an extent
  737. * if we notice a given block is corrupted or bad.
  738. *
  739. * 3) Make it easy to migrate blocks for FS shrinking or storage pool
  740. * maintenance. This is actually the same as #2, but with a slightly
  741. * different use case.
  742. *
  743. * There are two kinds of back refs. The implicit back refs is optimized
  744. * for pointers in non-shared tree blocks. For a given pointer in a block,
  745. * back refs of this kind provide information about the block's owner tree
  746. * and the pointer's key. These information allow us to find the block by
  747. * b-tree searching. The full back refs is for pointers in tree blocks not
  748. * referenced by their owner trees. The location of tree block is recorded
  749. * in the back refs. Actually the full back refs is generic, and can be
  750. * used in all cases the implicit back refs is used. The major shortcoming
  751. * of the full back refs is its overhead. Every time a tree block gets
  752. * COWed, we have to update back refs entry for all pointers in it.
  753. *
  754. * For a newly allocated tree block, we use implicit back refs for
  755. * pointers in it. This means most tree related operations only involve
  756. * implicit back refs. For a tree block created in old transaction, the
  757. * only way to drop a reference to it is COW it. So we can detect the
  758. * event that tree block loses its owner tree's reference and do the
  759. * back refs conversion.
  760. *
  761. * When a tree block is COW'd through a tree, there are four cases:
  762. *
  763. * The reference count of the block is one and the tree is the block's
  764. * owner tree. Nothing to do in this case.
  765. *
  766. * The reference count of the block is one and the tree is not the
  767. * block's owner tree. In this case, full back refs is used for pointers
  768. * in the block. Remove these full back refs, add implicit back refs for
  769. * every pointers in the new block.
  770. *
  771. * The reference count of the block is greater than one and the tree is
  772. * the block's owner tree. In this case, implicit back refs is used for
  773. * pointers in the block. Add full back refs for every pointers in the
  774. * block, increase lower level extents' reference counts. The original
  775. * implicit back refs are entailed to the new block.
  776. *
  777. * The reference count of the block is greater than one and the tree is
  778. * not the block's owner tree. Add implicit back refs for every pointer in
  779. * the new block, increase lower level extents' reference count.
  780. *
  781. * Back Reference Key composing:
  782. *
  783. * The key objectid corresponds to the first byte in the extent,
  784. * The key type is used to differentiate between types of back refs.
  785. * There are different meanings of the key offset for different types
  786. * of back refs.
  787. *
  788. * File extents can be referenced by:
  789. *
  790. * - multiple snapshots, subvolumes, or different generations in one subvol
  791. * - different files inside a single subvolume
  792. * - different offsets inside a file (bookend extents in file.c)
  793. *
  794. * The extent ref structure for the implicit back refs has fields for:
  795. *
  796. * - Objectid of the subvolume root
  797. * - objectid of the file holding the reference
  798. * - original offset in the file
  799. * - how many bookend extents
  800. *
  801. * The key offset for the implicit back refs is hash of the first
  802. * three fields.
  803. *
  804. * The extent ref structure for the full back refs has field for:
  805. *
  806. * - number of pointers in the tree leaf
  807. *
  808. * The key offset for the implicit back refs is the first byte of
  809. * the tree leaf
  810. *
  811. * When a file extent is allocated, The implicit back refs is used.
  812. * the fields are filled in:
  813. *
  814. * (root_key.objectid, inode objectid, offset in file, 1)
  815. *
  816. * When a file extent is removed file truncation, we find the
  817. * corresponding implicit back refs and check the following fields:
  818. *
  819. * (btrfs_header_owner(leaf), inode objectid, offset in file)
  820. *
  821. * Btree extents can be referenced by:
  822. *
  823. * - Different subvolumes
  824. *
  825. * Both the implicit back refs and the full back refs for tree blocks
  826. * only consist of key. The key offset for the implicit back refs is
  827. * objectid of block's owner tree. The key offset for the full back refs
  828. * is the first byte of parent block.
  829. *
  830. * When implicit back refs is used, information about the lowest key and
  831. * level of the tree block are required. These information are stored in
  832. * tree block info structure.
  833. */
  834. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  835. static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
  836. struct btrfs_root *root,
  837. struct btrfs_path *path,
  838. u64 owner, u32 extra_size)
  839. {
  840. struct btrfs_extent_item *item;
  841. struct btrfs_extent_item_v0 *ei0;
  842. struct btrfs_extent_ref_v0 *ref0;
  843. struct btrfs_tree_block_info *bi;
  844. struct extent_buffer *leaf;
  845. struct btrfs_key key;
  846. struct btrfs_key found_key;
  847. u32 new_size = sizeof(*item);
  848. u64 refs;
  849. int ret;
  850. leaf = path->nodes[0];
  851. BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
  852. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  853. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  854. struct btrfs_extent_item_v0);
  855. refs = btrfs_extent_refs_v0(leaf, ei0);
  856. if (owner == (u64)-1) {
  857. while (1) {
  858. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  859. ret = btrfs_next_leaf(root, path);
  860. if (ret < 0)
  861. return ret;
  862. BUG_ON(ret > 0); /* Corruption */
  863. leaf = path->nodes[0];
  864. }
  865. btrfs_item_key_to_cpu(leaf, &found_key,
  866. path->slots[0]);
  867. BUG_ON(key.objectid != found_key.objectid);
  868. if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
  869. path->slots[0]++;
  870. continue;
  871. }
  872. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  873. struct btrfs_extent_ref_v0);
  874. owner = btrfs_ref_objectid_v0(leaf, ref0);
  875. break;
  876. }
  877. }
  878. btrfs_release_path(path);
  879. if (owner < BTRFS_FIRST_FREE_OBJECTID)
  880. new_size += sizeof(*bi);
  881. new_size -= sizeof(*ei0);
  882. ret = btrfs_search_slot(trans, root, &key, path,
  883. new_size + extra_size, 1);
  884. if (ret < 0)
  885. return ret;
  886. BUG_ON(ret); /* Corruption */
  887. btrfs_extend_item(trans, root, path, new_size);
  888. leaf = path->nodes[0];
  889. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  890. btrfs_set_extent_refs(leaf, item, refs);
  891. /* FIXME: get real generation */
  892. btrfs_set_extent_generation(leaf, item, 0);
  893. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  894. btrfs_set_extent_flags(leaf, item,
  895. BTRFS_EXTENT_FLAG_TREE_BLOCK |
  896. BTRFS_BLOCK_FLAG_FULL_BACKREF);
  897. bi = (struct btrfs_tree_block_info *)(item + 1);
  898. /* FIXME: get first key of the block */
  899. memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
  900. btrfs_set_tree_block_level(leaf, bi, (int)owner);
  901. } else {
  902. btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
  903. }
  904. btrfs_mark_buffer_dirty(leaf);
  905. return 0;
  906. }
  907. #endif
  908. static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
  909. {
  910. u32 high_crc = ~(u32)0;
  911. u32 low_crc = ~(u32)0;
  912. __le64 lenum;
  913. lenum = cpu_to_le64(root_objectid);
  914. high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
  915. lenum = cpu_to_le64(owner);
  916. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  917. lenum = cpu_to_le64(offset);
  918. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  919. return ((u64)high_crc << 31) ^ (u64)low_crc;
  920. }
  921. static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
  922. struct btrfs_extent_data_ref *ref)
  923. {
  924. return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
  925. btrfs_extent_data_ref_objectid(leaf, ref),
  926. btrfs_extent_data_ref_offset(leaf, ref));
  927. }
  928. static int match_extent_data_ref(struct extent_buffer *leaf,
  929. struct btrfs_extent_data_ref *ref,
  930. u64 root_objectid, u64 owner, u64 offset)
  931. {
  932. if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
  933. btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
  934. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  935. return 0;
  936. return 1;
  937. }
  938. static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
  939. struct btrfs_root *root,
  940. struct btrfs_path *path,
  941. u64 bytenr, u64 parent,
  942. u64 root_objectid,
  943. u64 owner, u64 offset)
  944. {
  945. struct btrfs_key key;
  946. struct btrfs_extent_data_ref *ref;
  947. struct extent_buffer *leaf;
  948. u32 nritems;
  949. int ret;
  950. int recow;
  951. int err = -ENOENT;
  952. key.objectid = bytenr;
  953. if (parent) {
  954. key.type = BTRFS_SHARED_DATA_REF_KEY;
  955. key.offset = parent;
  956. } else {
  957. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  958. key.offset = hash_extent_data_ref(root_objectid,
  959. owner, offset);
  960. }
  961. again:
  962. recow = 0;
  963. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  964. if (ret < 0) {
  965. err = ret;
  966. goto fail;
  967. }
  968. if (parent) {
  969. if (!ret)
  970. return 0;
  971. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  972. key.type = BTRFS_EXTENT_REF_V0_KEY;
  973. btrfs_release_path(path);
  974. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  975. if (ret < 0) {
  976. err = ret;
  977. goto fail;
  978. }
  979. if (!ret)
  980. return 0;
  981. #endif
  982. goto fail;
  983. }
  984. leaf = path->nodes[0];
  985. nritems = btrfs_header_nritems(leaf);
  986. while (1) {
  987. if (path->slots[0] >= nritems) {
  988. ret = btrfs_next_leaf(root, path);
  989. if (ret < 0)
  990. err = ret;
  991. if (ret)
  992. goto fail;
  993. leaf = path->nodes[0];
  994. nritems = btrfs_header_nritems(leaf);
  995. recow = 1;
  996. }
  997. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  998. if (key.objectid != bytenr ||
  999. key.type != BTRFS_EXTENT_DATA_REF_KEY)
  1000. goto fail;
  1001. ref = btrfs_item_ptr(leaf, path->slots[0],
  1002. struct btrfs_extent_data_ref);
  1003. if (match_extent_data_ref(leaf, ref, root_objectid,
  1004. owner, offset)) {
  1005. if (recow) {
  1006. btrfs_release_path(path);
  1007. goto again;
  1008. }
  1009. err = 0;
  1010. break;
  1011. }
  1012. path->slots[0]++;
  1013. }
  1014. fail:
  1015. return err;
  1016. }
  1017. static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
  1018. struct btrfs_root *root,
  1019. struct btrfs_path *path,
  1020. u64 bytenr, u64 parent,
  1021. u64 root_objectid, u64 owner,
  1022. u64 offset, int refs_to_add)
  1023. {
  1024. struct btrfs_key key;
  1025. struct extent_buffer *leaf;
  1026. u32 size;
  1027. u32 num_refs;
  1028. int ret;
  1029. key.objectid = bytenr;
  1030. if (parent) {
  1031. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1032. key.offset = parent;
  1033. size = sizeof(struct btrfs_shared_data_ref);
  1034. } else {
  1035. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1036. key.offset = hash_extent_data_ref(root_objectid,
  1037. owner, offset);
  1038. size = sizeof(struct btrfs_extent_data_ref);
  1039. }
  1040. ret = btrfs_insert_empty_item(trans, root, path, &key, size);
  1041. if (ret && ret != -EEXIST)
  1042. goto fail;
  1043. leaf = path->nodes[0];
  1044. if (parent) {
  1045. struct btrfs_shared_data_ref *ref;
  1046. ref = btrfs_item_ptr(leaf, path->slots[0],
  1047. struct btrfs_shared_data_ref);
  1048. if (ret == 0) {
  1049. btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
  1050. } else {
  1051. num_refs = btrfs_shared_data_ref_count(leaf, ref);
  1052. num_refs += refs_to_add;
  1053. btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
  1054. }
  1055. } else {
  1056. struct btrfs_extent_data_ref *ref;
  1057. while (ret == -EEXIST) {
  1058. ref = btrfs_item_ptr(leaf, path->slots[0],
  1059. struct btrfs_extent_data_ref);
  1060. if (match_extent_data_ref(leaf, ref, root_objectid,
  1061. owner, offset))
  1062. break;
  1063. btrfs_release_path(path);
  1064. key.offset++;
  1065. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1066. size);
  1067. if (ret && ret != -EEXIST)
  1068. goto fail;
  1069. leaf = path->nodes[0];
  1070. }
  1071. ref = btrfs_item_ptr(leaf, path->slots[0],
  1072. struct btrfs_extent_data_ref);
  1073. if (ret == 0) {
  1074. btrfs_set_extent_data_ref_root(leaf, ref,
  1075. root_objectid);
  1076. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  1077. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  1078. btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
  1079. } else {
  1080. num_refs = btrfs_extent_data_ref_count(leaf, ref);
  1081. num_refs += refs_to_add;
  1082. btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
  1083. }
  1084. }
  1085. btrfs_mark_buffer_dirty(leaf);
  1086. ret = 0;
  1087. fail:
  1088. btrfs_release_path(path);
  1089. return ret;
  1090. }
  1091. static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
  1092. struct btrfs_root *root,
  1093. struct btrfs_path *path,
  1094. int refs_to_drop)
  1095. {
  1096. struct btrfs_key key;
  1097. struct btrfs_extent_data_ref *ref1 = NULL;
  1098. struct btrfs_shared_data_ref *ref2 = NULL;
  1099. struct extent_buffer *leaf;
  1100. u32 num_refs = 0;
  1101. int ret = 0;
  1102. leaf = path->nodes[0];
  1103. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1104. if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1105. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1106. struct btrfs_extent_data_ref);
  1107. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1108. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1109. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1110. struct btrfs_shared_data_ref);
  1111. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1112. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1113. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1114. struct btrfs_extent_ref_v0 *ref0;
  1115. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1116. struct btrfs_extent_ref_v0);
  1117. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1118. #endif
  1119. } else {
  1120. BUG();
  1121. }
  1122. BUG_ON(num_refs < refs_to_drop);
  1123. num_refs -= refs_to_drop;
  1124. if (num_refs == 0) {
  1125. ret = btrfs_del_item(trans, root, path);
  1126. } else {
  1127. if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
  1128. btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
  1129. else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
  1130. btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
  1131. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1132. else {
  1133. struct btrfs_extent_ref_v0 *ref0;
  1134. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1135. struct btrfs_extent_ref_v0);
  1136. btrfs_set_ref_count_v0(leaf, ref0, num_refs);
  1137. }
  1138. #endif
  1139. btrfs_mark_buffer_dirty(leaf);
  1140. }
  1141. return ret;
  1142. }
  1143. static noinline u32 extent_data_ref_count(struct btrfs_root *root,
  1144. struct btrfs_path *path,
  1145. struct btrfs_extent_inline_ref *iref)
  1146. {
  1147. struct btrfs_key key;
  1148. struct extent_buffer *leaf;
  1149. struct btrfs_extent_data_ref *ref1;
  1150. struct btrfs_shared_data_ref *ref2;
  1151. u32 num_refs = 0;
  1152. leaf = path->nodes[0];
  1153. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1154. if (iref) {
  1155. if (btrfs_extent_inline_ref_type(leaf, iref) ==
  1156. BTRFS_EXTENT_DATA_REF_KEY) {
  1157. ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
  1158. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1159. } else {
  1160. ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
  1161. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1162. }
  1163. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1164. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1165. struct btrfs_extent_data_ref);
  1166. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1167. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1168. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1169. struct btrfs_shared_data_ref);
  1170. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1171. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1172. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1173. struct btrfs_extent_ref_v0 *ref0;
  1174. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1175. struct btrfs_extent_ref_v0);
  1176. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1177. #endif
  1178. } else {
  1179. WARN_ON(1);
  1180. }
  1181. return num_refs;
  1182. }
  1183. static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
  1184. struct btrfs_root *root,
  1185. struct btrfs_path *path,
  1186. u64 bytenr, u64 parent,
  1187. u64 root_objectid)
  1188. {
  1189. struct btrfs_key key;
  1190. int ret;
  1191. key.objectid = bytenr;
  1192. if (parent) {
  1193. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1194. key.offset = parent;
  1195. } else {
  1196. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1197. key.offset = root_objectid;
  1198. }
  1199. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1200. if (ret > 0)
  1201. ret = -ENOENT;
  1202. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1203. if (ret == -ENOENT && parent) {
  1204. btrfs_release_path(path);
  1205. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1206. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1207. if (ret > 0)
  1208. ret = -ENOENT;
  1209. }
  1210. #endif
  1211. return ret;
  1212. }
  1213. static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
  1214. struct btrfs_root *root,
  1215. struct btrfs_path *path,
  1216. u64 bytenr, u64 parent,
  1217. u64 root_objectid)
  1218. {
  1219. struct btrfs_key key;
  1220. int ret;
  1221. key.objectid = bytenr;
  1222. if (parent) {
  1223. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1224. key.offset = parent;
  1225. } else {
  1226. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1227. key.offset = root_objectid;
  1228. }
  1229. ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
  1230. btrfs_release_path(path);
  1231. return ret;
  1232. }
  1233. static inline int extent_ref_type(u64 parent, u64 owner)
  1234. {
  1235. int type;
  1236. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1237. if (parent > 0)
  1238. type = BTRFS_SHARED_BLOCK_REF_KEY;
  1239. else
  1240. type = BTRFS_TREE_BLOCK_REF_KEY;
  1241. } else {
  1242. if (parent > 0)
  1243. type = BTRFS_SHARED_DATA_REF_KEY;
  1244. else
  1245. type = BTRFS_EXTENT_DATA_REF_KEY;
  1246. }
  1247. return type;
  1248. }
  1249. static int find_next_key(struct btrfs_path *path, int level,
  1250. struct btrfs_key *key)
  1251. {
  1252. for (; level < BTRFS_MAX_LEVEL; level++) {
  1253. if (!path->nodes[level])
  1254. break;
  1255. if (path->slots[level] + 1 >=
  1256. btrfs_header_nritems(path->nodes[level]))
  1257. continue;
  1258. if (level == 0)
  1259. btrfs_item_key_to_cpu(path->nodes[level], key,
  1260. path->slots[level] + 1);
  1261. else
  1262. btrfs_node_key_to_cpu(path->nodes[level], key,
  1263. path->slots[level] + 1);
  1264. return 0;
  1265. }
  1266. return 1;
  1267. }
  1268. /*
  1269. * look for inline back ref. if back ref is found, *ref_ret is set
  1270. * to the address of inline back ref, and 0 is returned.
  1271. *
  1272. * if back ref isn't found, *ref_ret is set to the address where it
  1273. * should be inserted, and -ENOENT is returned.
  1274. *
  1275. * if insert is true and there are too many inline back refs, the path
  1276. * points to the extent item, and -EAGAIN is returned.
  1277. *
  1278. * NOTE: inline back refs are ordered in the same way that back ref
  1279. * items in the tree are ordered.
  1280. */
  1281. static noinline_for_stack
  1282. int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1283. struct btrfs_root *root,
  1284. struct btrfs_path *path,
  1285. struct btrfs_extent_inline_ref **ref_ret,
  1286. u64 bytenr, u64 num_bytes,
  1287. u64 parent, u64 root_objectid,
  1288. u64 owner, u64 offset, int insert)
  1289. {
  1290. struct btrfs_key key;
  1291. struct extent_buffer *leaf;
  1292. struct btrfs_extent_item *ei;
  1293. struct btrfs_extent_inline_ref *iref;
  1294. u64 flags;
  1295. u64 item_size;
  1296. unsigned long ptr;
  1297. unsigned long end;
  1298. int extra_size;
  1299. int type;
  1300. int want;
  1301. int ret;
  1302. int err = 0;
  1303. key.objectid = bytenr;
  1304. key.type = BTRFS_EXTENT_ITEM_KEY;
  1305. key.offset = num_bytes;
  1306. want = extent_ref_type(parent, owner);
  1307. if (insert) {
  1308. extra_size = btrfs_extent_inline_ref_size(want);
  1309. path->keep_locks = 1;
  1310. } else
  1311. extra_size = -1;
  1312. ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
  1313. if (ret < 0) {
  1314. err = ret;
  1315. goto out;
  1316. }
  1317. if (ret && !insert) {
  1318. err = -ENOENT;
  1319. goto out;
  1320. }
  1321. BUG_ON(ret); /* Corruption */
  1322. leaf = path->nodes[0];
  1323. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1324. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1325. if (item_size < sizeof(*ei)) {
  1326. if (!insert) {
  1327. err = -ENOENT;
  1328. goto out;
  1329. }
  1330. ret = convert_extent_item_v0(trans, root, path, owner,
  1331. extra_size);
  1332. if (ret < 0) {
  1333. err = ret;
  1334. goto out;
  1335. }
  1336. leaf = path->nodes[0];
  1337. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1338. }
  1339. #endif
  1340. BUG_ON(item_size < sizeof(*ei));
  1341. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1342. flags = btrfs_extent_flags(leaf, ei);
  1343. ptr = (unsigned long)(ei + 1);
  1344. end = (unsigned long)ei + item_size;
  1345. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  1346. ptr += sizeof(struct btrfs_tree_block_info);
  1347. BUG_ON(ptr > end);
  1348. } else {
  1349. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
  1350. }
  1351. err = -ENOENT;
  1352. while (1) {
  1353. if (ptr >= end) {
  1354. WARN_ON(ptr > end);
  1355. break;
  1356. }
  1357. iref = (struct btrfs_extent_inline_ref *)ptr;
  1358. type = btrfs_extent_inline_ref_type(leaf, iref);
  1359. if (want < type)
  1360. break;
  1361. if (want > type) {
  1362. ptr += btrfs_extent_inline_ref_size(type);
  1363. continue;
  1364. }
  1365. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1366. struct btrfs_extent_data_ref *dref;
  1367. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1368. if (match_extent_data_ref(leaf, dref, root_objectid,
  1369. owner, offset)) {
  1370. err = 0;
  1371. break;
  1372. }
  1373. if (hash_extent_data_ref_item(leaf, dref) <
  1374. hash_extent_data_ref(root_objectid, owner, offset))
  1375. break;
  1376. } else {
  1377. u64 ref_offset;
  1378. ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
  1379. if (parent > 0) {
  1380. if (parent == ref_offset) {
  1381. err = 0;
  1382. break;
  1383. }
  1384. if (ref_offset < parent)
  1385. break;
  1386. } else {
  1387. if (root_objectid == ref_offset) {
  1388. err = 0;
  1389. break;
  1390. }
  1391. if (ref_offset < root_objectid)
  1392. break;
  1393. }
  1394. }
  1395. ptr += btrfs_extent_inline_ref_size(type);
  1396. }
  1397. if (err == -ENOENT && insert) {
  1398. if (item_size + extra_size >=
  1399. BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
  1400. err = -EAGAIN;
  1401. goto out;
  1402. }
  1403. /*
  1404. * To add new inline back ref, we have to make sure
  1405. * there is no corresponding back ref item.
  1406. * For simplicity, we just do not add new inline back
  1407. * ref if there is any kind of item for this block
  1408. */
  1409. if (find_next_key(path, 0, &key) == 0 &&
  1410. key.objectid == bytenr &&
  1411. key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
  1412. err = -EAGAIN;
  1413. goto out;
  1414. }
  1415. }
  1416. *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
  1417. out:
  1418. if (insert) {
  1419. path->keep_locks = 0;
  1420. btrfs_unlock_up_safe(path, 1);
  1421. }
  1422. return err;
  1423. }
  1424. /*
  1425. * helper to add new inline back ref
  1426. */
  1427. static noinline_for_stack
  1428. void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1429. struct btrfs_root *root,
  1430. struct btrfs_path *path,
  1431. struct btrfs_extent_inline_ref *iref,
  1432. u64 parent, u64 root_objectid,
  1433. u64 owner, u64 offset, int refs_to_add,
  1434. struct btrfs_delayed_extent_op *extent_op)
  1435. {
  1436. struct extent_buffer *leaf;
  1437. struct btrfs_extent_item *ei;
  1438. unsigned long ptr;
  1439. unsigned long end;
  1440. unsigned long item_offset;
  1441. u64 refs;
  1442. int size;
  1443. int type;
  1444. leaf = path->nodes[0];
  1445. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1446. item_offset = (unsigned long)iref - (unsigned long)ei;
  1447. type = extent_ref_type(parent, owner);
  1448. size = btrfs_extent_inline_ref_size(type);
  1449. btrfs_extend_item(trans, root, path, size);
  1450. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1451. refs = btrfs_extent_refs(leaf, ei);
  1452. refs += refs_to_add;
  1453. btrfs_set_extent_refs(leaf, ei, refs);
  1454. if (extent_op)
  1455. __run_delayed_extent_op(extent_op, leaf, ei);
  1456. ptr = (unsigned long)ei + item_offset;
  1457. end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
  1458. if (ptr < end - size)
  1459. memmove_extent_buffer(leaf, ptr + size, ptr,
  1460. end - size - ptr);
  1461. iref = (struct btrfs_extent_inline_ref *)ptr;
  1462. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  1463. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1464. struct btrfs_extent_data_ref *dref;
  1465. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1466. btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
  1467. btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
  1468. btrfs_set_extent_data_ref_offset(leaf, dref, offset);
  1469. btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
  1470. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1471. struct btrfs_shared_data_ref *sref;
  1472. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1473. btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
  1474. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1475. } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  1476. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1477. } else {
  1478. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  1479. }
  1480. btrfs_mark_buffer_dirty(leaf);
  1481. }
  1482. static int lookup_extent_backref(struct btrfs_trans_handle *trans,
  1483. struct btrfs_root *root,
  1484. struct btrfs_path *path,
  1485. struct btrfs_extent_inline_ref **ref_ret,
  1486. u64 bytenr, u64 num_bytes, u64 parent,
  1487. u64 root_objectid, u64 owner, u64 offset)
  1488. {
  1489. int ret;
  1490. ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
  1491. bytenr, num_bytes, parent,
  1492. root_objectid, owner, offset, 0);
  1493. if (ret != -ENOENT)
  1494. return ret;
  1495. btrfs_release_path(path);
  1496. *ref_ret = NULL;
  1497. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1498. ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
  1499. root_objectid);
  1500. } else {
  1501. ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
  1502. root_objectid, owner, offset);
  1503. }
  1504. return ret;
  1505. }
  1506. /*
  1507. * helper to update/remove inline back ref
  1508. */
  1509. static noinline_for_stack
  1510. void update_inline_extent_backref(struct btrfs_trans_handle *trans,
  1511. struct btrfs_root *root,
  1512. struct btrfs_path *path,
  1513. struct btrfs_extent_inline_ref *iref,
  1514. int refs_to_mod,
  1515. struct btrfs_delayed_extent_op *extent_op)
  1516. {
  1517. struct extent_buffer *leaf;
  1518. struct btrfs_extent_item *ei;
  1519. struct btrfs_extent_data_ref *dref = NULL;
  1520. struct btrfs_shared_data_ref *sref = NULL;
  1521. unsigned long ptr;
  1522. unsigned long end;
  1523. u32 item_size;
  1524. int size;
  1525. int type;
  1526. u64 refs;
  1527. leaf = path->nodes[0];
  1528. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1529. refs = btrfs_extent_refs(leaf, ei);
  1530. WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
  1531. refs += refs_to_mod;
  1532. btrfs_set_extent_refs(leaf, ei, refs);
  1533. if (extent_op)
  1534. __run_delayed_extent_op(extent_op, leaf, ei);
  1535. type = btrfs_extent_inline_ref_type(leaf, iref);
  1536. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1537. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1538. refs = btrfs_extent_data_ref_count(leaf, dref);
  1539. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1540. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1541. refs = btrfs_shared_data_ref_count(leaf, sref);
  1542. } else {
  1543. refs = 1;
  1544. BUG_ON(refs_to_mod != -1);
  1545. }
  1546. BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
  1547. refs += refs_to_mod;
  1548. if (refs > 0) {
  1549. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  1550. btrfs_set_extent_data_ref_count(leaf, dref, refs);
  1551. else
  1552. btrfs_set_shared_data_ref_count(leaf, sref, refs);
  1553. } else {
  1554. size = btrfs_extent_inline_ref_size(type);
  1555. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1556. ptr = (unsigned long)iref;
  1557. end = (unsigned long)ei + item_size;
  1558. if (ptr + size < end)
  1559. memmove_extent_buffer(leaf, ptr, ptr + size,
  1560. end - ptr - size);
  1561. item_size -= size;
  1562. btrfs_truncate_item(trans, root, path, item_size, 1);
  1563. }
  1564. btrfs_mark_buffer_dirty(leaf);
  1565. }
  1566. static noinline_for_stack
  1567. int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
  1568. struct btrfs_root *root,
  1569. struct btrfs_path *path,
  1570. u64 bytenr, u64 num_bytes, u64 parent,
  1571. u64 root_objectid, u64 owner,
  1572. u64 offset, int refs_to_add,
  1573. struct btrfs_delayed_extent_op *extent_op)
  1574. {
  1575. struct btrfs_extent_inline_ref *iref;
  1576. int ret;
  1577. ret = lookup_inline_extent_backref(trans, root, path, &iref,
  1578. bytenr, num_bytes, parent,
  1579. root_objectid, owner, offset, 1);
  1580. if (ret == 0) {
  1581. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
  1582. update_inline_extent_backref(trans, root, path, iref,
  1583. refs_to_add, extent_op);
  1584. } else if (ret == -ENOENT) {
  1585. setup_inline_extent_backref(trans, root, path, iref, parent,
  1586. root_objectid, owner, offset,
  1587. refs_to_add, extent_op);
  1588. ret = 0;
  1589. }
  1590. return ret;
  1591. }
  1592. static int insert_extent_backref(struct btrfs_trans_handle *trans,
  1593. struct btrfs_root *root,
  1594. struct btrfs_path *path,
  1595. u64 bytenr, u64 parent, u64 root_objectid,
  1596. u64 owner, u64 offset, int refs_to_add)
  1597. {
  1598. int ret;
  1599. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1600. BUG_ON(refs_to_add != 1);
  1601. ret = insert_tree_block_ref(trans, root, path, bytenr,
  1602. parent, root_objectid);
  1603. } else {
  1604. ret = insert_extent_data_ref(trans, root, path, bytenr,
  1605. parent, root_objectid,
  1606. owner, offset, refs_to_add);
  1607. }
  1608. return ret;
  1609. }
  1610. static int remove_extent_backref(struct btrfs_trans_handle *trans,
  1611. struct btrfs_root *root,
  1612. struct btrfs_path *path,
  1613. struct btrfs_extent_inline_ref *iref,
  1614. int refs_to_drop, int is_data)
  1615. {
  1616. int ret = 0;
  1617. BUG_ON(!is_data && refs_to_drop != 1);
  1618. if (iref) {
  1619. update_inline_extent_backref(trans, root, path, iref,
  1620. -refs_to_drop, NULL);
  1621. } else if (is_data) {
  1622. ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
  1623. } else {
  1624. ret = btrfs_del_item(trans, root, path);
  1625. }
  1626. return ret;
  1627. }
  1628. static int btrfs_issue_discard(struct block_device *bdev,
  1629. u64 start, u64 len)
  1630. {
  1631. return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
  1632. }
  1633. static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
  1634. u64 num_bytes, u64 *actual_bytes)
  1635. {
  1636. int ret;
  1637. u64 discarded_bytes = 0;
  1638. struct btrfs_bio *bbio = NULL;
  1639. /* Tell the block device(s) that the sectors can be discarded */
  1640. ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
  1641. bytenr, &num_bytes, &bbio, 0);
  1642. /* Error condition is -ENOMEM */
  1643. if (!ret) {
  1644. struct btrfs_bio_stripe *stripe = bbio->stripes;
  1645. int i;
  1646. for (i = 0; i < bbio->num_stripes; i++, stripe++) {
  1647. if (!stripe->dev->can_discard)
  1648. continue;
  1649. ret = btrfs_issue_discard(stripe->dev->bdev,
  1650. stripe->physical,
  1651. stripe->length);
  1652. if (!ret)
  1653. discarded_bytes += stripe->length;
  1654. else if (ret != -EOPNOTSUPP)
  1655. break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
  1656. /*
  1657. * Just in case we get back EOPNOTSUPP for some reason,
  1658. * just ignore the return value so we don't screw up
  1659. * people calling discard_extent.
  1660. */
  1661. ret = 0;
  1662. }
  1663. kfree(bbio);
  1664. }
  1665. if (actual_bytes)
  1666. *actual_bytes = discarded_bytes;
  1667. return ret;
  1668. }
  1669. /* Can return -ENOMEM */
  1670. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1671. struct btrfs_root *root,
  1672. u64 bytenr, u64 num_bytes, u64 parent,
  1673. u64 root_objectid, u64 owner, u64 offset, int for_cow)
  1674. {
  1675. int ret;
  1676. struct btrfs_fs_info *fs_info = root->fs_info;
  1677. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
  1678. root_objectid == BTRFS_TREE_LOG_OBJECTID);
  1679. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1680. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  1681. num_bytes,
  1682. parent, root_objectid, (int)owner,
  1683. BTRFS_ADD_DELAYED_REF, NULL, for_cow);
  1684. } else {
  1685. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  1686. num_bytes,
  1687. parent, root_objectid, owner, offset,
  1688. BTRFS_ADD_DELAYED_REF, NULL, for_cow);
  1689. }
  1690. return ret;
  1691. }
  1692. static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1693. struct btrfs_root *root,
  1694. u64 bytenr, u64 num_bytes,
  1695. u64 parent, u64 root_objectid,
  1696. u64 owner, u64 offset, int refs_to_add,
  1697. struct btrfs_delayed_extent_op *extent_op)
  1698. {
  1699. struct btrfs_path *path;
  1700. struct extent_buffer *leaf;
  1701. struct btrfs_extent_item *item;
  1702. u64 refs;
  1703. int ret;
  1704. int err = 0;
  1705. path = btrfs_alloc_path();
  1706. if (!path)
  1707. return -ENOMEM;
  1708. path->reada = 1;
  1709. path->leave_spinning = 1;
  1710. /* this will setup the path even if it fails to insert the back ref */
  1711. ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
  1712. path, bytenr, num_bytes, parent,
  1713. root_objectid, owner, offset,
  1714. refs_to_add, extent_op);
  1715. if (ret == 0)
  1716. goto out;
  1717. if (ret != -EAGAIN) {
  1718. err = ret;
  1719. goto out;
  1720. }
  1721. leaf = path->nodes[0];
  1722. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1723. refs = btrfs_extent_refs(leaf, item);
  1724. btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
  1725. if (extent_op)
  1726. __run_delayed_extent_op(extent_op, leaf, item);
  1727. btrfs_mark_buffer_dirty(leaf);
  1728. btrfs_release_path(path);
  1729. path->reada = 1;
  1730. path->leave_spinning = 1;
  1731. /* now insert the actual backref */
  1732. ret = insert_extent_backref(trans, root->fs_info->extent_root,
  1733. path, bytenr, parent, root_objectid,
  1734. owner, offset, refs_to_add);
  1735. if (ret)
  1736. btrfs_abort_transaction(trans, root, ret);
  1737. out:
  1738. btrfs_free_path(path);
  1739. return err;
  1740. }
  1741. static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
  1742. struct btrfs_root *root,
  1743. struct btrfs_delayed_ref_node *node,
  1744. struct btrfs_delayed_extent_op *extent_op,
  1745. int insert_reserved)
  1746. {
  1747. int ret = 0;
  1748. struct btrfs_delayed_data_ref *ref;
  1749. struct btrfs_key ins;
  1750. u64 parent = 0;
  1751. u64 ref_root = 0;
  1752. u64 flags = 0;
  1753. ins.objectid = node->bytenr;
  1754. ins.offset = node->num_bytes;
  1755. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1756. ref = btrfs_delayed_node_to_data_ref(node);
  1757. if (node->type == BTRFS_SHARED_DATA_REF_KEY)
  1758. parent = ref->parent;
  1759. else
  1760. ref_root = ref->root;
  1761. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1762. if (extent_op) {
  1763. BUG_ON(extent_op->update_key);
  1764. flags |= extent_op->flags_to_set;
  1765. }
  1766. ret = alloc_reserved_file_extent(trans, root,
  1767. parent, ref_root, flags,
  1768. ref->objectid, ref->offset,
  1769. &ins, node->ref_mod);
  1770. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1771. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  1772. node->num_bytes, parent,
  1773. ref_root, ref->objectid,
  1774. ref->offset, node->ref_mod,
  1775. extent_op);
  1776. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1777. ret = __btrfs_free_extent(trans, root, node->bytenr,
  1778. node->num_bytes, parent,
  1779. ref_root, ref->objectid,
  1780. ref->offset, node->ref_mod,
  1781. extent_op);
  1782. } else {
  1783. BUG();
  1784. }
  1785. return ret;
  1786. }
  1787. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  1788. struct extent_buffer *leaf,
  1789. struct btrfs_extent_item *ei)
  1790. {
  1791. u64 flags = btrfs_extent_flags(leaf, ei);
  1792. if (extent_op->update_flags) {
  1793. flags |= extent_op->flags_to_set;
  1794. btrfs_set_extent_flags(leaf, ei, flags);
  1795. }
  1796. if (extent_op->update_key) {
  1797. struct btrfs_tree_block_info *bi;
  1798. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
  1799. bi = (struct btrfs_tree_block_info *)(ei + 1);
  1800. btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
  1801. }
  1802. }
  1803. static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
  1804. struct btrfs_root *root,
  1805. struct btrfs_delayed_ref_node *node,
  1806. struct btrfs_delayed_extent_op *extent_op)
  1807. {
  1808. struct btrfs_key key;
  1809. struct btrfs_path *path;
  1810. struct btrfs_extent_item *ei;
  1811. struct extent_buffer *leaf;
  1812. u32 item_size;
  1813. int ret;
  1814. int err = 0;
  1815. if (trans->aborted)
  1816. return 0;
  1817. path = btrfs_alloc_path();
  1818. if (!path)
  1819. return -ENOMEM;
  1820. key.objectid = node->bytenr;
  1821. key.type = BTRFS_EXTENT_ITEM_KEY;
  1822. key.offset = node->num_bytes;
  1823. path->reada = 1;
  1824. path->leave_spinning = 1;
  1825. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
  1826. path, 0, 1);
  1827. if (ret < 0) {
  1828. err = ret;
  1829. goto out;
  1830. }
  1831. if (ret > 0) {
  1832. err = -EIO;
  1833. goto out;
  1834. }
  1835. leaf = path->nodes[0];
  1836. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1837. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1838. if (item_size < sizeof(*ei)) {
  1839. ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
  1840. path, (u64)-1, 0);
  1841. if (ret < 0) {
  1842. err = ret;
  1843. goto out;
  1844. }
  1845. leaf = path->nodes[0];
  1846. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1847. }
  1848. #endif
  1849. BUG_ON(item_size < sizeof(*ei));
  1850. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1851. __run_delayed_extent_op(extent_op, leaf, ei);
  1852. btrfs_mark_buffer_dirty(leaf);
  1853. out:
  1854. btrfs_free_path(path);
  1855. return err;
  1856. }
  1857. static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
  1858. struct btrfs_root *root,
  1859. struct btrfs_delayed_ref_node *node,
  1860. struct btrfs_delayed_extent_op *extent_op,
  1861. int insert_reserved)
  1862. {
  1863. int ret = 0;
  1864. struct btrfs_delayed_tree_ref *ref;
  1865. struct btrfs_key ins;
  1866. u64 parent = 0;
  1867. u64 ref_root = 0;
  1868. ins.objectid = node->bytenr;
  1869. ins.offset = node->num_bytes;
  1870. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1871. ref = btrfs_delayed_node_to_tree_ref(node);
  1872. if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  1873. parent = ref->parent;
  1874. else
  1875. ref_root = ref->root;
  1876. BUG_ON(node->ref_mod != 1);
  1877. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1878. BUG_ON(!extent_op || !extent_op->update_flags ||
  1879. !extent_op->update_key);
  1880. ret = alloc_reserved_tree_block(trans, root,
  1881. parent, ref_root,
  1882. extent_op->flags_to_set,
  1883. &extent_op->key,
  1884. ref->level, &ins);
  1885. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1886. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  1887. node->num_bytes, parent, ref_root,
  1888. ref->level, 0, 1, extent_op);
  1889. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1890. ret = __btrfs_free_extent(trans, root, node->bytenr,
  1891. node->num_bytes, parent, ref_root,
  1892. ref->level, 0, 1, extent_op);
  1893. } else {
  1894. BUG();
  1895. }
  1896. return ret;
  1897. }
  1898. /* helper function to actually process a single delayed ref entry */
  1899. static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
  1900. struct btrfs_root *root,
  1901. struct btrfs_delayed_ref_node *node,
  1902. struct btrfs_delayed_extent_op *extent_op,
  1903. int insert_reserved)
  1904. {
  1905. int ret = 0;
  1906. if (trans->aborted)
  1907. return 0;
  1908. if (btrfs_delayed_ref_is_head(node)) {
  1909. struct btrfs_delayed_ref_head *head;
  1910. /*
  1911. * we've hit the end of the chain and we were supposed
  1912. * to insert this extent into the tree. But, it got
  1913. * deleted before we ever needed to insert it, so all
  1914. * we have to do is clean up the accounting
  1915. */
  1916. BUG_ON(extent_op);
  1917. head = btrfs_delayed_node_to_head(node);
  1918. if (insert_reserved) {
  1919. btrfs_pin_extent(root, node->bytenr,
  1920. node->num_bytes, 1);
  1921. if (head->is_data) {
  1922. ret = btrfs_del_csums(trans, root,
  1923. node->bytenr,
  1924. node->num_bytes);
  1925. }
  1926. }
  1927. return ret;
  1928. }
  1929. if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
  1930. node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  1931. ret = run_delayed_tree_ref(trans, root, node, extent_op,
  1932. insert_reserved);
  1933. else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
  1934. node->type == BTRFS_SHARED_DATA_REF_KEY)
  1935. ret = run_delayed_data_ref(trans, root, node, extent_op,
  1936. insert_reserved);
  1937. else
  1938. BUG();
  1939. return ret;
  1940. }
  1941. static noinline struct btrfs_delayed_ref_node *
  1942. select_delayed_ref(struct btrfs_delayed_ref_head *head)
  1943. {
  1944. struct rb_node *node;
  1945. struct btrfs_delayed_ref_node *ref;
  1946. int action = BTRFS_ADD_DELAYED_REF;
  1947. again:
  1948. /*
  1949. * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
  1950. * this prevents ref count from going down to zero when
  1951. * there still are pending delayed ref.
  1952. */
  1953. node = rb_prev(&head->node.rb_node);
  1954. while (1) {
  1955. if (!node)
  1956. break;
  1957. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  1958. rb_node);
  1959. if (ref->bytenr != head->node.bytenr)
  1960. break;
  1961. if (ref->action == action)
  1962. return ref;
  1963. node = rb_prev(node);
  1964. }
  1965. if (action == BTRFS_ADD_DELAYED_REF) {
  1966. action = BTRFS_DROP_DELAYED_REF;
  1967. goto again;
  1968. }
  1969. return NULL;
  1970. }
  1971. /*
  1972. * Returns 0 on success or if called with an already aborted transaction.
  1973. * Returns -ENOMEM or -EIO on failure and will abort the transaction.
  1974. */
  1975. static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
  1976. struct btrfs_root *root,
  1977. struct list_head *cluster)
  1978. {
  1979. struct btrfs_delayed_ref_root *delayed_refs;
  1980. struct btrfs_delayed_ref_node *ref;
  1981. struct btrfs_delayed_ref_head *locked_ref = NULL;
  1982. struct btrfs_delayed_extent_op *extent_op;
  1983. struct btrfs_fs_info *fs_info = root->fs_info;
  1984. int ret;
  1985. int count = 0;
  1986. int must_insert_reserved = 0;
  1987. delayed_refs = &trans->transaction->delayed_refs;
  1988. while (1) {
  1989. if (!locked_ref) {
  1990. /* pick a new head ref from the cluster list */
  1991. if (list_empty(cluster))
  1992. break;
  1993. locked_ref = list_entry(cluster->next,
  1994. struct btrfs_delayed_ref_head, cluster);
  1995. /* grab the lock that says we are going to process
  1996. * all the refs for this head */
  1997. ret = btrfs_delayed_ref_lock(trans, locked_ref);
  1998. /*
  1999. * we may have dropped the spin lock to get the head
  2000. * mutex lock, and that might have given someone else
  2001. * time to free the head. If that's true, it has been
  2002. * removed from our list and we can move on.
  2003. */
  2004. if (ret == -EAGAIN) {
  2005. locked_ref = NULL;
  2006. count++;
  2007. continue;
  2008. }
  2009. }
  2010. /*
  2011. * We need to try and merge add/drops of the same ref since we
  2012. * can run into issues with relocate dropping the implicit ref
  2013. * and then it being added back again before the drop can
  2014. * finish. If we merged anything we need to re-loop so we can
  2015. * get a good ref.
  2016. */
  2017. btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
  2018. locked_ref);
  2019. /*
  2020. * locked_ref is the head node, so we have to go one
  2021. * node back for any delayed ref updates
  2022. */
  2023. ref = select_delayed_ref(locked_ref);
  2024. if (ref && ref->seq &&
  2025. btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
  2026. /*
  2027. * there are still refs with lower seq numbers in the
  2028. * process of being added. Don't run this ref yet.
  2029. */
  2030. list_del_init(&locked_ref->cluster);
  2031. btrfs_delayed_ref_unlock(locked_ref);
  2032. locked_ref = NULL;
  2033. delayed_refs->num_heads_ready++;
  2034. spin_unlock(&delayed_refs->lock);
  2035. cond_resched();
  2036. spin_lock(&delayed_refs->lock);
  2037. continue;
  2038. }
  2039. /*
  2040. * record the must insert reserved flag before we
  2041. * drop the spin lock.
  2042. */
  2043. must_insert_reserved = locked_ref->must_insert_reserved;
  2044. locked_ref->must_insert_reserved = 0;
  2045. extent_op = locked_ref->extent_op;
  2046. locked_ref->extent_op = NULL;
  2047. if (!ref) {
  2048. /* All delayed refs have been processed, Go ahead
  2049. * and send the head node to run_one_delayed_ref,
  2050. * so that any accounting fixes can happen
  2051. */
  2052. ref = &locked_ref->node;
  2053. if (extent_op && must_insert_reserved) {
  2054. btrfs_free_delayed_extent_op(extent_op);
  2055. extent_op = NULL;
  2056. }
  2057. if (extent_op) {
  2058. spin_unlock(&delayed_refs->lock);
  2059. ret = run_delayed_extent_op(trans, root,
  2060. ref, extent_op);
  2061. btrfs_free_delayed_extent_op(extent_op);
  2062. if (ret) {
  2063. printk(KERN_DEBUG
  2064. "btrfs: run_delayed_extent_op "
  2065. "returned %d\n", ret);
  2066. spin_lock(&delayed_refs->lock);
  2067. btrfs_delayed_ref_unlock(locked_ref);
  2068. return ret;
  2069. }
  2070. goto next;
  2071. }
  2072. }
  2073. ref->in_tree = 0;
  2074. rb_erase(&ref->rb_node, &delayed_refs->root);
  2075. delayed_refs->num_entries--;
  2076. if (!btrfs_delayed_ref_is_head(ref)) {
  2077. /*
  2078. * when we play the delayed ref, also correct the
  2079. * ref_mod on head
  2080. */
  2081. switch (ref->action) {
  2082. case BTRFS_ADD_DELAYED_REF:
  2083. case BTRFS_ADD_DELAYED_EXTENT:
  2084. locked_ref->node.ref_mod -= ref->ref_mod;
  2085. break;
  2086. case BTRFS_DROP_DELAYED_REF:
  2087. locked_ref->node.ref_mod += ref->ref_mod;
  2088. break;
  2089. default:
  2090. WARN_ON(1);
  2091. }
  2092. }
  2093. spin_unlock(&delayed_refs->lock);
  2094. ret = run_one_delayed_ref(trans, root, ref, extent_op,
  2095. must_insert_reserved);
  2096. btrfs_free_delayed_extent_op(extent_op);
  2097. if (ret) {
  2098. btrfs_delayed_ref_unlock(locked_ref);
  2099. btrfs_put_delayed_ref(ref);
  2100. printk(KERN_DEBUG
  2101. "btrfs: run_one_delayed_ref returned %d\n", ret);
  2102. spin_lock(&delayed_refs->lock);
  2103. return ret;
  2104. }
  2105. /*
  2106. * If this node is a head, that means all the refs in this head
  2107. * have been dealt with, and we will pick the next head to deal
  2108. * with, so we must unlock the head and drop it from the cluster
  2109. * list before we release it.
  2110. */
  2111. if (btrfs_delayed_ref_is_head(ref)) {
  2112. list_del_init(&locked_ref->cluster);
  2113. btrfs_delayed_ref_unlock(locked_ref);
  2114. locked_ref = NULL;
  2115. }
  2116. btrfs_put_delayed_ref(ref);
  2117. count++;
  2118. next:
  2119. cond_resched();
  2120. spin_lock(&delayed_refs->lock);
  2121. }
  2122. return count;
  2123. }
  2124. #ifdef SCRAMBLE_DELAYED_REFS
  2125. /*
  2126. * Normally delayed refs get processed in ascending bytenr order. This
  2127. * correlates in most cases to the order added. To expose dependencies on this
  2128. * order, we start to process the tree in the middle instead of the beginning
  2129. */
  2130. static u64 find_middle(struct rb_root *root)
  2131. {
  2132. struct rb_node *n = root->rb_node;
  2133. struct btrfs_delayed_ref_node *entry;
  2134. int alt = 1;
  2135. u64 middle;
  2136. u64 first = 0, last = 0;
  2137. n = rb_first(root);
  2138. if (n) {
  2139. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2140. first = entry->bytenr;
  2141. }
  2142. n = rb_last(root);
  2143. if (n) {
  2144. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2145. last = entry->bytenr;
  2146. }
  2147. n = root->rb_node;
  2148. while (n) {
  2149. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2150. WARN_ON(!entry->in_tree);
  2151. middle = entry->bytenr;
  2152. if (alt)
  2153. n = n->rb_left;
  2154. else
  2155. n = n->rb_right;
  2156. alt = 1 - alt;
  2157. }
  2158. return middle;
  2159. }
  2160. #endif
  2161. int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
  2162. struct btrfs_fs_info *fs_info)
  2163. {
  2164. struct qgroup_update *qgroup_update;
  2165. int ret = 0;
  2166. if (list_empty(&trans->qgroup_ref_list) !=
  2167. !trans->delayed_ref_elem.seq) {
  2168. /* list without seq or seq without list */
  2169. printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
  2170. list_empty(&trans->qgroup_ref_list) ? "" : " not",
  2171. trans->delayed_ref_elem.seq);
  2172. BUG();
  2173. }
  2174. if (!trans->delayed_ref_elem.seq)
  2175. return 0;
  2176. while (!list_empty(&trans->qgroup_ref_list)) {
  2177. qgroup_update = list_first_entry(&trans->qgroup_ref_list,
  2178. struct qgroup_update, list);
  2179. list_del(&qgroup_update->list);
  2180. if (!ret)
  2181. ret = btrfs_qgroup_account_ref(
  2182. trans, fs_info, qgroup_update->node,
  2183. qgroup_update->extent_op);
  2184. kfree(qgroup_update);
  2185. }
  2186. btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
  2187. return ret;
  2188. }
  2189. /*
  2190. * this starts processing the delayed reference count updates and
  2191. * extent insertions we have queued up so far. count can be
  2192. * 0, which means to process everything in the tree at the start
  2193. * of the run (but not newly added entries), or it can be some target
  2194. * number you'd like to process.
  2195. *
  2196. * Returns 0 on success or if called with an aborted transaction
  2197. * Returns <0 on error and aborts the transaction
  2198. */
  2199. int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2200. struct btrfs_root *root, unsigned long count)
  2201. {
  2202. struct rb_node *node;
  2203. struct btrfs_delayed_ref_root *delayed_refs;
  2204. struct btrfs_delayed_ref_node *ref;
  2205. struct list_head cluster;
  2206. int ret;
  2207. u64 delayed_start;
  2208. int run_all = count == (unsigned long)-1;
  2209. int run_most = 0;
  2210. int loops;
  2211. /* We'll clean this up in btrfs_cleanup_transaction */
  2212. if (trans->aborted)
  2213. return 0;
  2214. if (root == root->fs_info->extent_root)
  2215. root = root->fs_info->tree_root;
  2216. btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
  2217. delayed_refs = &trans->transaction->delayed_refs;
  2218. INIT_LIST_HEAD(&cluster);
  2219. again:
  2220. loops = 0;
  2221. spin_lock(&delayed_refs->lock);
  2222. #ifdef SCRAMBLE_DELAYED_REFS
  2223. delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
  2224. #endif
  2225. if (count == 0) {
  2226. count = delayed_refs->num_entries * 2;
  2227. run_most = 1;
  2228. }
  2229. while (1) {
  2230. if (!(run_all || run_most) &&
  2231. delayed_refs->num_heads_ready < 64)
  2232. break;
  2233. /*
  2234. * go find something we can process in the rbtree. We start at
  2235. * the beginning of the tree, and then build a cluster
  2236. * of refs to process starting at the first one we are able to
  2237. * lock
  2238. */
  2239. delayed_start = delayed_refs->run_delayed_start;
  2240. ret = btrfs_find_ref_cluster(trans, &cluster,
  2241. delayed_refs->run_delayed_start);
  2242. if (ret)
  2243. break;
  2244. ret = run_clustered_refs(trans, root, &cluster);
  2245. if (ret < 0) {
  2246. btrfs_release_ref_cluster(&cluster);
  2247. spin_unlock(&delayed_refs->lock);
  2248. btrfs_abort_transaction(trans, root, ret);
  2249. return ret;
  2250. }
  2251. count -= min_t(unsigned long, ret, count);
  2252. if (count == 0)
  2253. break;
  2254. if (delayed_start >= delayed_refs->run_delayed_start) {
  2255. if (loops == 0) {
  2256. /*
  2257. * btrfs_find_ref_cluster looped. let's do one
  2258. * more cycle. if we don't run any delayed ref
  2259. * during that cycle (because we can't because
  2260. * all of them are blocked), bail out.
  2261. */
  2262. loops = 1;
  2263. } else {
  2264. /*
  2265. * no runnable refs left, stop trying
  2266. */
  2267. BUG_ON(run_all);
  2268. break;
  2269. }
  2270. }
  2271. if (ret) {
  2272. /* refs were run, let's reset staleness detection */
  2273. loops = 0;
  2274. }
  2275. }
  2276. if (run_all) {
  2277. if (!list_empty(&trans->new_bgs)) {
  2278. spin_unlock(&delayed_refs->lock);
  2279. btrfs_create_pending_block_groups(trans, root);
  2280. spin_lock(&delayed_refs->lock);
  2281. }
  2282. node = rb_first(&delayed_refs->root);
  2283. if (!node)
  2284. goto out;
  2285. count = (unsigned long)-1;
  2286. while (node) {
  2287. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  2288. rb_node);
  2289. if (btrfs_delayed_ref_is_head(ref)) {
  2290. struct btrfs_delayed_ref_head *head;
  2291. head = btrfs_delayed_node_to_head(ref);
  2292. atomic_inc(&ref->refs);
  2293. spin_unlock(&delayed_refs->lock);
  2294. /*
  2295. * Mutex was contended, block until it's
  2296. * released and try again
  2297. */
  2298. mutex_lock(&head->mutex);
  2299. mutex_unlock(&head->mutex);
  2300. btrfs_put_delayed_ref(ref);
  2301. cond_resched();
  2302. goto again;
  2303. }
  2304. node = rb_next(node);
  2305. }
  2306. spin_unlock(&delayed_refs->lock);
  2307. schedule_timeout(1);
  2308. goto again;
  2309. }
  2310. out:
  2311. spin_unlock(&delayed_refs->lock);
  2312. assert_qgroups_uptodate(trans);
  2313. return 0;
  2314. }
  2315. int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
  2316. struct btrfs_root *root,
  2317. u64 bytenr, u64 num_bytes, u64 flags,
  2318. int is_data)
  2319. {
  2320. struct btrfs_delayed_extent_op *extent_op;
  2321. int ret;
  2322. extent_op = btrfs_alloc_delayed_extent_op();
  2323. if (!extent_op)
  2324. return -ENOMEM;
  2325. extent_op->flags_to_set = flags;
  2326. extent_op->update_flags = 1;
  2327. extent_op->update_key = 0;
  2328. extent_op->is_data = is_data ? 1 : 0;
  2329. ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
  2330. num_bytes, extent_op);
  2331. if (ret)
  2332. btrfs_free_delayed_extent_op(extent_op);
  2333. return ret;
  2334. }
  2335. static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
  2336. struct btrfs_root *root,
  2337. struct btrfs_path *path,
  2338. u64 objectid, u64 offset, u64 bytenr)
  2339. {
  2340. struct btrfs_delayed_ref_head *head;
  2341. struct btrfs_delayed_ref_node *ref;
  2342. struct btrfs_delayed_data_ref *data_ref;
  2343. struct btrfs_delayed_ref_root *delayed_refs;
  2344. struct rb_node *node;
  2345. int ret = 0;
  2346. ret = -ENOENT;
  2347. delayed_refs = &trans->transaction->delayed_refs;
  2348. spin_lock(&delayed_refs->lock);
  2349. head = btrfs_find_delayed_ref_head(trans, bytenr);
  2350. if (!head)
  2351. goto out;
  2352. if (!mutex_trylock(&head->mutex)) {
  2353. atomic_inc(&head->node.refs);
  2354. spin_unlock(&delayed_refs->lock);
  2355. btrfs_release_path(path);
  2356. /*
  2357. * Mutex was contended, block until it's released and let
  2358. * caller try again
  2359. */
  2360. mutex_lock(&head->mutex);
  2361. mutex_unlock(&head->mutex);
  2362. btrfs_put_delayed_ref(&head->node);
  2363. return -EAGAIN;
  2364. }
  2365. node = rb_prev(&head->node.rb_node);
  2366. if (!node)
  2367. goto out_unlock;
  2368. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  2369. if (ref->bytenr != bytenr)
  2370. goto out_unlock;
  2371. ret = 1;
  2372. if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
  2373. goto out_unlock;
  2374. data_ref = btrfs_delayed_node_to_data_ref(ref);
  2375. node = rb_prev(node);
  2376. if (node) {
  2377. int seq = ref->seq;
  2378. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  2379. if (ref->bytenr == bytenr && ref->seq == seq)
  2380. goto out_unlock;
  2381. }
  2382. if (data_ref->root != root->root_key.objectid ||
  2383. data_ref->objectid != objectid || data_ref->offset != offset)
  2384. goto out_unlock;
  2385. ret = 0;
  2386. out_unlock:
  2387. mutex_unlock(&head->mutex);
  2388. out:
  2389. spin_unlock(&delayed_refs->lock);
  2390. return ret;
  2391. }
  2392. static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
  2393. struct btrfs_root *root,
  2394. struct btrfs_path *path,
  2395. u64 objectid, u64 offset, u64 bytenr)
  2396. {
  2397. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2398. struct extent_buffer *leaf;
  2399. struct btrfs_extent_data_ref *ref;
  2400. struct btrfs_extent_inline_ref *iref;
  2401. struct btrfs_extent_item *ei;
  2402. struct btrfs_key key;
  2403. u32 item_size;
  2404. int ret;
  2405. key.objectid = bytenr;
  2406. key.offset = (u64)-1;
  2407. key.type = BTRFS_EXTENT_ITEM_KEY;
  2408. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  2409. if (ret < 0)
  2410. goto out;
  2411. BUG_ON(ret == 0); /* Corruption */
  2412. ret = -ENOENT;
  2413. if (path->slots[0] == 0)
  2414. goto out;
  2415. path->slots[0]--;
  2416. leaf = path->nodes[0];
  2417. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2418. if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
  2419. goto out;
  2420. ret = 1;
  2421. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2422. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2423. if (item_size < sizeof(*ei)) {
  2424. WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  2425. goto out;
  2426. }
  2427. #endif
  2428. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2429. if (item_size != sizeof(*ei) +
  2430. btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
  2431. goto out;
  2432. if (btrfs_extent_generation(leaf, ei) <=
  2433. btrfs_root_last_snapshot(&root->root_item))
  2434. goto out;
  2435. iref = (struct btrfs_extent_inline_ref *)(ei + 1);
  2436. if (btrfs_extent_inline_ref_type(leaf, iref) !=
  2437. BTRFS_EXTENT_DATA_REF_KEY)
  2438. goto out;
  2439. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  2440. if (btrfs_extent_refs(leaf, ei) !=
  2441. btrfs_extent_data_ref_count(leaf, ref) ||
  2442. btrfs_extent_data_ref_root(leaf, ref) !=
  2443. root->root_key.objectid ||
  2444. btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
  2445. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  2446. goto out;
  2447. ret = 0;
  2448. out:
  2449. return ret;
  2450. }
  2451. int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
  2452. struct btrfs_root *root,
  2453. u64 objectid, u64 offset, u64 bytenr)
  2454. {
  2455. struct btrfs_path *path;
  2456. int ret;
  2457. int ret2;
  2458. path = btrfs_alloc_path();
  2459. if (!path)
  2460. return -ENOENT;
  2461. do {
  2462. ret = check_committed_ref(trans, root, path, objectid,
  2463. offset, bytenr);
  2464. if (ret && ret != -ENOENT)
  2465. goto out;
  2466. ret2 = check_delayed_ref(trans, root, path, objectid,
  2467. offset, bytenr);
  2468. } while (ret2 == -EAGAIN);
  2469. if (ret2 && ret2 != -ENOENT) {
  2470. ret = ret2;
  2471. goto out;
  2472. }
  2473. if (ret != -ENOENT || ret2 != -ENOENT)
  2474. ret = 0;
  2475. out:
  2476. btrfs_free_path(path);
  2477. if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
  2478. WARN_ON(ret > 0);
  2479. return ret;
  2480. }
  2481. static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
  2482. struct btrfs_root *root,
  2483. struct extent_buffer *buf,
  2484. int full_backref, int inc, int for_cow)
  2485. {
  2486. u64 bytenr;
  2487. u64 num_bytes;
  2488. u64 parent;
  2489. u64 ref_root;
  2490. u32 nritems;
  2491. struct btrfs_key key;
  2492. struct btrfs_file_extent_item *fi;
  2493. int i;
  2494. int level;
  2495. int ret = 0;
  2496. int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
  2497. u64, u64, u64, u64, u64, u64, int);
  2498. ref_root = btrfs_header_owner(buf);
  2499. nritems = btrfs_header_nritems(buf);
  2500. level = btrfs_header_level(buf);
  2501. if (!root->ref_cows && level == 0)
  2502. return 0;
  2503. if (inc)
  2504. process_func = btrfs_inc_extent_ref;
  2505. else
  2506. process_func = btrfs_free_extent;
  2507. if (full_backref)
  2508. parent = buf->start;
  2509. else
  2510. parent = 0;
  2511. for (i = 0; i < nritems; i++) {
  2512. if (level == 0) {
  2513. btrfs_item_key_to_cpu(buf, &key, i);
  2514. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  2515. continue;
  2516. fi = btrfs_item_ptr(buf, i,
  2517. struct btrfs_file_extent_item);
  2518. if (btrfs_file_extent_type(buf, fi) ==
  2519. BTRFS_FILE_EXTENT_INLINE)
  2520. continue;
  2521. bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  2522. if (bytenr == 0)
  2523. continue;
  2524. num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
  2525. key.offset -= btrfs_file_extent_offset(buf, fi);
  2526. ret = process_func(trans, root, bytenr, num_bytes,
  2527. parent, ref_root, key.objectid,
  2528. key.offset, for_cow);
  2529. if (ret)
  2530. goto fail;
  2531. } else {
  2532. bytenr = btrfs_node_blockptr(buf, i);
  2533. num_bytes = btrfs_level_size(root, level - 1);
  2534. ret = process_func(trans, root, bytenr, num_bytes,
  2535. parent, ref_root, level - 1, 0,
  2536. for_cow);
  2537. if (ret)
  2538. goto fail;
  2539. }
  2540. }
  2541. return 0;
  2542. fail:
  2543. return ret;
  2544. }
  2545. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2546. struct extent_buffer *buf, int full_backref, int for_cow)
  2547. {
  2548. return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
  2549. }
  2550. int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2551. struct extent_buffer *buf, int full_backref, int for_cow)
  2552. {
  2553. return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
  2554. }
  2555. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  2556. struct btrfs_root *root,
  2557. struct btrfs_path *path,
  2558. struct btrfs_block_group_cache *cache)
  2559. {
  2560. int ret;
  2561. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2562. unsigned long bi;
  2563. struct extent_buffer *leaf;
  2564. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  2565. if (ret < 0)
  2566. goto fail;
  2567. BUG_ON(ret); /* Corruption */
  2568. leaf = path->nodes[0];
  2569. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  2570. write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
  2571. btrfs_mark_buffer_dirty(leaf);
  2572. btrfs_release_path(path);
  2573. fail:
  2574. if (ret) {
  2575. btrfs_abort_transaction(trans, root, ret);
  2576. return ret;
  2577. }
  2578. return 0;
  2579. }
  2580. static struct btrfs_block_group_cache *
  2581. next_block_group(struct btrfs_root *root,
  2582. struct btrfs_block_group_cache *cache)
  2583. {
  2584. struct rb_node *node;
  2585. spin_lock(&root->fs_info->block_group_cache_lock);
  2586. node = rb_next(&cache->cache_node);
  2587. btrfs_put_block_group(cache);
  2588. if (node) {
  2589. cache = rb_entry(node, struct btrfs_block_group_cache,
  2590. cache_node);
  2591. btrfs_get_block_group(cache);
  2592. } else
  2593. cache = NULL;
  2594. spin_unlock(&root->fs_info->block_group_cache_lock);
  2595. return cache;
  2596. }
  2597. static int cache_save_setup(struct btrfs_block_group_cache *block_group,
  2598. struct btrfs_trans_handle *trans,
  2599. struct btrfs_path *path)
  2600. {
  2601. struct btrfs_root *root = block_group->fs_info->tree_root;
  2602. struct inode *inode = NULL;
  2603. u64 alloc_hint = 0;
  2604. int dcs = BTRFS_DC_ERROR;
  2605. int num_pages = 0;
  2606. int retries = 0;
  2607. int ret = 0;
  2608. /*
  2609. * If this block group is smaller than 100 megs don't bother caching the
  2610. * block group.
  2611. */
  2612. if (block_group->key.offset < (100 * 1024 * 1024)) {
  2613. spin_lock(&block_group->lock);
  2614. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  2615. spin_unlock(&block_group->lock);
  2616. return 0;
  2617. }
  2618. again:
  2619. inode = lookup_free_space_inode(root, block_group, path);
  2620. if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
  2621. ret = PTR_ERR(inode);
  2622. btrfs_release_path(path);
  2623. goto out;
  2624. }
  2625. if (IS_ERR(inode)) {
  2626. BUG_ON(retries);
  2627. retries++;
  2628. if (block_group->ro)
  2629. goto out_free;
  2630. ret = create_free_space_inode(root, trans, block_group, path);
  2631. if (ret)
  2632. goto out_free;
  2633. goto again;
  2634. }
  2635. /* We've already setup this transaction, go ahead and exit */
  2636. if (block_group->cache_generation == trans->transid &&
  2637. i_size_read(inode)) {
  2638. dcs = BTRFS_DC_SETUP;
  2639. goto out_put;
  2640. }
  2641. /*
  2642. * We want to set the generation to 0, that way if anything goes wrong
  2643. * from here on out we know not to trust this cache when we load up next
  2644. * time.
  2645. */
  2646. BTRFS_I(inode)->generation = 0;
  2647. ret = btrfs_update_inode(trans, root, inode);
  2648. WARN_ON(ret);
  2649. if (i_size_read(inode) > 0) {
  2650. ret = btrfs_truncate_free_space_cache(root, trans, path,
  2651. inode);
  2652. if (ret)
  2653. goto out_put;
  2654. }
  2655. spin_lock(&block_group->lock);
  2656. if (block_group->cached != BTRFS_CACHE_FINISHED ||
  2657. !btrfs_test_opt(root, SPACE_CACHE)) {
  2658. /*
  2659. * don't bother trying to write stuff out _if_
  2660. * a) we're not cached,
  2661. * b) we're with nospace_cache mount option.
  2662. */
  2663. dcs = BTRFS_DC_WRITTEN;
  2664. spin_unlock(&block_group->lock);
  2665. goto out_put;
  2666. }
  2667. spin_unlock(&block_group->lock);
  2668. /*
  2669. * Try to preallocate enough space based on how big the block group is.
  2670. * Keep in mind this has to include any pinned space which could end up
  2671. * taking up quite a bit since it's not folded into the other space
  2672. * cache.
  2673. */
  2674. num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
  2675. if (!num_pages)
  2676. num_pages = 1;
  2677. num_pages *= 16;
  2678. num_pages *= PAGE_CACHE_SIZE;
  2679. ret = btrfs_check_data_free_space(inode, num_pages);
  2680. if (ret)
  2681. goto out_put;
  2682. ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
  2683. num_pages, num_pages,
  2684. &alloc_hint);
  2685. if (!ret)
  2686. dcs = BTRFS_DC_SETUP;
  2687. btrfs_free_reserved_data_space(inode, num_pages);
  2688. out_put:
  2689. iput(inode);
  2690. out_free:
  2691. btrfs_release_path(path);
  2692. out:
  2693. spin_lock(&block_group->lock);
  2694. if (!ret && dcs == BTRFS_DC_SETUP)
  2695. block_group->cache_generation = trans->transid;
  2696. block_group->disk_cache_state = dcs;
  2697. spin_unlock(&block_group->lock);
  2698. return ret;
  2699. }
  2700. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  2701. struct btrfs_root *root)
  2702. {
  2703. struct btrfs_block_group_cache *cache;
  2704. int err = 0;
  2705. struct btrfs_path *path;
  2706. u64 last = 0;
  2707. path = btrfs_alloc_path();
  2708. if (!path)
  2709. return -ENOMEM;
  2710. again:
  2711. while (1) {
  2712. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2713. while (cache) {
  2714. if (cache->disk_cache_state == BTRFS_DC_CLEAR)
  2715. break;
  2716. cache = next_block_group(root, cache);
  2717. }
  2718. if (!cache) {
  2719. if (last == 0)
  2720. break;
  2721. last = 0;
  2722. continue;
  2723. }
  2724. err = cache_save_setup(cache, trans, path);
  2725. last = cache->key.objectid + cache->key.offset;
  2726. btrfs_put_block_group(cache);
  2727. }
  2728. while (1) {
  2729. if (last == 0) {
  2730. err = btrfs_run_delayed_refs(trans, root,
  2731. (unsigned long)-1);
  2732. if (err) /* File system offline */
  2733. goto out;
  2734. }
  2735. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2736. while (cache) {
  2737. if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
  2738. btrfs_put_block_group(cache);
  2739. goto again;
  2740. }
  2741. if (cache->dirty)
  2742. break;
  2743. cache = next_block_group(root, cache);
  2744. }
  2745. if (!cache) {
  2746. if (last == 0)
  2747. break;
  2748. last = 0;
  2749. continue;
  2750. }
  2751. if (cache->disk_cache_state == BTRFS_DC_SETUP)
  2752. cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
  2753. cache->dirty = 0;
  2754. last = cache->key.objectid + cache->key.offset;
  2755. err = write_one_cache_group(trans, root, path, cache);
  2756. if (err) /* File system offline */
  2757. goto out;
  2758. btrfs_put_block_group(cache);
  2759. }
  2760. while (1) {
  2761. /*
  2762. * I don't think this is needed since we're just marking our
  2763. * preallocated extent as written, but just in case it can't
  2764. * hurt.
  2765. */
  2766. if (last == 0) {
  2767. err = btrfs_run_delayed_refs(trans, root,
  2768. (unsigned long)-1);
  2769. if (err) /* File system offline */
  2770. goto out;
  2771. }
  2772. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2773. while (cache) {
  2774. /*
  2775. * Really this shouldn't happen, but it could if we
  2776. * couldn't write the entire preallocated extent and
  2777. * splitting the extent resulted in a new block.
  2778. */
  2779. if (cache->dirty) {
  2780. btrfs_put_block_group(cache);
  2781. goto again;
  2782. }
  2783. if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  2784. break;
  2785. cache = next_block_group(root, cache);
  2786. }
  2787. if (!cache) {
  2788. if (last == 0)
  2789. break;
  2790. last = 0;
  2791. continue;
  2792. }
  2793. err = btrfs_write_out_cache(root, trans, cache, path);
  2794. /*
  2795. * If we didn't have an error then the cache state is still
  2796. * NEED_WRITE, so we can set it to WRITTEN.
  2797. */
  2798. if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  2799. cache->disk_cache_state = BTRFS_DC_WRITTEN;
  2800. last = cache->key.objectid + cache->key.offset;
  2801. btrfs_put_block_group(cache);
  2802. }
  2803. out:
  2804. btrfs_free_path(path);
  2805. return err;
  2806. }
  2807. int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
  2808. {
  2809. struct btrfs_block_group_cache *block_group;
  2810. int readonly = 0;
  2811. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  2812. if (!block_group || block_group->ro)
  2813. readonly = 1;
  2814. if (block_group)
  2815. btrfs_put_block_group(block_group);
  2816. return readonly;
  2817. }
  2818. static int update_space_info(struct btrfs_fs_info *info, u64 flags,
  2819. u64 total_bytes, u64 bytes_used,
  2820. struct btrfs_space_info **space_info)
  2821. {
  2822. struct btrfs_space_info *found;
  2823. int i;
  2824. int factor;
  2825. if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
  2826. BTRFS_BLOCK_GROUP_RAID10))
  2827. factor = 2;
  2828. else
  2829. factor = 1;
  2830. found = __find_space_info(info, flags);
  2831. if (found) {
  2832. spin_lock(&found->lock);
  2833. found->total_bytes += total_bytes;
  2834. found->disk_total += total_bytes * factor;
  2835. found->bytes_used += bytes_used;
  2836. found->disk_used += bytes_used * factor;
  2837. found->full = 0;
  2838. spin_unlock(&found->lock);
  2839. *space_info = found;
  2840. return 0;
  2841. }
  2842. found = kzalloc(sizeof(*found), GFP_NOFS);
  2843. if (!found)
  2844. return -ENOMEM;
  2845. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  2846. INIT_LIST_HEAD(&found->block_groups[i]);
  2847. init_rwsem(&found->groups_sem);
  2848. spin_lock_init(&found->lock);
  2849. found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
  2850. found->total_bytes = total_bytes;
  2851. found->disk_total = total_bytes * factor;
  2852. found->bytes_used = bytes_used;
  2853. found->disk_used = bytes_used * factor;
  2854. found->bytes_pinned = 0;
  2855. found->bytes_reserved = 0;
  2856. found->bytes_readonly = 0;
  2857. found->bytes_may_use = 0;
  2858. found->full = 0;
  2859. found->force_alloc = CHUNK_ALLOC_NO_FORCE;
  2860. found->chunk_alloc = 0;
  2861. found->flush = 0;
  2862. init_waitqueue_head(&found->wait);
  2863. *space_info = found;
  2864. list_add_rcu(&found->list, &info->space_info);
  2865. if (flags & BTRFS_BLOCK_GROUP_DATA)
  2866. info->data_sinfo = found;
  2867. return 0;
  2868. }
  2869. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  2870. {
  2871. u64 extra_flags = chunk_to_extended(flags) &
  2872. BTRFS_EXTENDED_PROFILE_MASK;
  2873. if (flags & BTRFS_BLOCK_GROUP_DATA)
  2874. fs_info->avail_data_alloc_bits |= extra_flags;
  2875. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  2876. fs_info->avail_metadata_alloc_bits |= extra_flags;
  2877. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  2878. fs_info->avail_system_alloc_bits |= extra_flags;
  2879. }
  2880. /*
  2881. * returns target flags in extended format or 0 if restripe for this
  2882. * chunk_type is not in progress
  2883. *
  2884. * should be called with either volume_mutex or balance_lock held
  2885. */
  2886. static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  2887. {
  2888. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  2889. u64 target = 0;
  2890. if (!bctl)
  2891. return 0;
  2892. if (flags & BTRFS_BLOCK_GROUP_DATA &&
  2893. bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  2894. target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  2895. } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  2896. bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  2897. target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  2898. } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  2899. bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  2900. target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  2901. }
  2902. return target;
  2903. }
  2904. /*
  2905. * @flags: available profiles in extended format (see ctree.h)
  2906. *
  2907. * Returns reduced profile in chunk format. If profile changing is in
  2908. * progress (either running or paused) picks the target profile (if it's
  2909. * already available), otherwise falls back to plain reducing.
  2910. */
  2911. u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
  2912. {
  2913. /*
  2914. * we add in the count of missing devices because we want
  2915. * to make sure that any RAID levels on a degraded FS
  2916. * continue to be honored.
  2917. */
  2918. u64 num_devices = root->fs_info->fs_devices->rw_devices +
  2919. root->fs_info->fs_devices->missing_devices;
  2920. u64 target;
  2921. /*
  2922. * see if restripe for this chunk_type is in progress, if so
  2923. * try to reduce to the target profile
  2924. */
  2925. spin_lock(&root->fs_info->balance_lock);
  2926. target = get_restripe_target(root->fs_info, flags);
  2927. if (target) {
  2928. /* pick target profile only if it's already available */
  2929. if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
  2930. spin_unlock(&root->fs_info->balance_lock);
  2931. return extended_to_chunk(target);
  2932. }
  2933. }
  2934. spin_unlock(&root->fs_info->balance_lock);
  2935. if (num_devices == 1)
  2936. flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
  2937. if (num_devices < 4)
  2938. flags &= ~BTRFS_BLOCK_GROUP_RAID10;
  2939. if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
  2940. (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  2941. BTRFS_BLOCK_GROUP_RAID10))) {
  2942. flags &= ~BTRFS_BLOCK_GROUP_DUP;
  2943. }
  2944. if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
  2945. (flags & BTRFS_BLOCK_GROUP_RAID10)) {
  2946. flags &= ~BTRFS_BLOCK_GROUP_RAID1;
  2947. }
  2948. if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
  2949. ((flags & BTRFS_BLOCK_GROUP_RAID1) |
  2950. (flags & BTRFS_BLOCK_GROUP_RAID10) |
  2951. (flags & BTRFS_BLOCK_GROUP_DUP))) {
  2952. flags &= ~BTRFS_BLOCK_GROUP_RAID0;
  2953. }
  2954. return extended_to_chunk(flags);
  2955. }
  2956. static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
  2957. {
  2958. if (flags & BTRFS_BLOCK_GROUP_DATA)
  2959. flags |= root->fs_info->avail_data_alloc_bits;
  2960. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  2961. flags |= root->fs_info->avail_system_alloc_bits;
  2962. else if (flags & BTRFS_BLOCK_GROUP_METADATA)
  2963. flags |= root->fs_info->avail_metadata_alloc_bits;
  2964. return btrfs_reduce_alloc_profile(root, flags);
  2965. }
  2966. u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
  2967. {
  2968. u64 flags;
  2969. if (data)
  2970. flags = BTRFS_BLOCK_GROUP_DATA;
  2971. else if (root == root->fs_info->chunk_root)
  2972. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  2973. else
  2974. flags = BTRFS_BLOCK_GROUP_METADATA;
  2975. return get_alloc_profile(root, flags);
  2976. }
  2977. /*
  2978. * This will check the space that the inode allocates from to make sure we have
  2979. * enough space for bytes.
  2980. */
  2981. int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
  2982. {
  2983. struct btrfs_space_info *data_sinfo;
  2984. struct btrfs_root *root = BTRFS_I(inode)->root;
  2985. struct btrfs_fs_info *fs_info = root->fs_info;
  2986. u64 used;
  2987. int ret = 0, committed = 0, alloc_chunk = 1;
  2988. /* make sure bytes are sectorsize aligned */
  2989. bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  2990. if (root == root->fs_info->tree_root ||
  2991. BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
  2992. alloc_chunk = 0;
  2993. committed = 1;
  2994. }
  2995. data_sinfo = fs_info->data_sinfo;
  2996. if (!data_sinfo)
  2997. goto alloc;
  2998. again:
  2999. /* make sure we have enough space to handle the data first */
  3000. spin_lock(&data_sinfo->lock);
  3001. used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
  3002. data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
  3003. data_sinfo->bytes_may_use;
  3004. if (used + bytes > data_sinfo->total_bytes) {
  3005. struct btrfs_trans_handle *trans;
  3006. /*
  3007. * if we don't have enough free bytes in this space then we need
  3008. * to alloc a new chunk.
  3009. */
  3010. if (!data_sinfo->full && alloc_chunk) {
  3011. u64 alloc_target;
  3012. data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
  3013. spin_unlock(&data_sinfo->lock);
  3014. alloc:
  3015. alloc_target = btrfs_get_alloc_profile(root, 1);
  3016. trans = btrfs_join_transaction(root);
  3017. if (IS_ERR(trans))
  3018. return PTR_ERR(trans);
  3019. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  3020. alloc_target,
  3021. CHUNK_ALLOC_NO_FORCE);
  3022. btrfs_end_transaction(trans, root);
  3023. if (ret < 0) {
  3024. if (ret != -ENOSPC)
  3025. return ret;
  3026. else
  3027. goto commit_trans;
  3028. }
  3029. if (!data_sinfo)
  3030. data_sinfo = fs_info->data_sinfo;
  3031. goto again;
  3032. }
  3033. /*
  3034. * If we have less pinned bytes than we want to allocate then
  3035. * don't bother committing the transaction, it won't help us.
  3036. */
  3037. if (data_sinfo->bytes_pinned < bytes)
  3038. committed = 1;
  3039. spin_unlock(&data_sinfo->lock);
  3040. /* commit the current transaction and try again */
  3041. commit_trans:
  3042. if (!committed &&
  3043. !atomic_read(&root->fs_info->open_ioctl_trans)) {
  3044. committed = 1;
  3045. trans = btrfs_join_transaction(root);
  3046. if (IS_ERR(trans))
  3047. return PTR_ERR(trans);
  3048. ret = btrfs_commit_transaction(trans, root);
  3049. if (ret)
  3050. return ret;
  3051. goto again;
  3052. }
  3053. return -ENOSPC;
  3054. }
  3055. data_sinfo->bytes_may_use += bytes;
  3056. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3057. data_sinfo->flags, bytes, 1);
  3058. spin_unlock(&data_sinfo->lock);
  3059. return 0;
  3060. }
  3061. /*
  3062. * Called if we need to clear a data reservation for this inode.
  3063. */
  3064. void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
  3065. {
  3066. struct btrfs_root *root = BTRFS_I(inode)->root;
  3067. struct btrfs_space_info *data_sinfo;
  3068. /* make sure bytes are sectorsize aligned */
  3069. bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  3070. data_sinfo = root->fs_info->data_sinfo;
  3071. spin_lock(&data_sinfo->lock);
  3072. data_sinfo->bytes_may_use -= bytes;
  3073. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3074. data_sinfo->flags, bytes, 0);
  3075. spin_unlock(&data_sinfo->lock);
  3076. }
  3077. static void force_metadata_allocation(struct btrfs_fs_info *info)
  3078. {
  3079. struct list_head *head = &info->space_info;
  3080. struct btrfs_space_info *found;
  3081. rcu_read_lock();
  3082. list_for_each_entry_rcu(found, head, list) {
  3083. if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
  3084. found->force_alloc = CHUNK_ALLOC_FORCE;
  3085. }
  3086. rcu_read_unlock();
  3087. }
  3088. static int should_alloc_chunk(struct btrfs_root *root,
  3089. struct btrfs_space_info *sinfo, int force)
  3090. {
  3091. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3092. u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
  3093. u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
  3094. u64 thresh;
  3095. if (force == CHUNK_ALLOC_FORCE)
  3096. return 1;
  3097. /*
  3098. * We need to take into account the global rsv because for all intents
  3099. * and purposes it's used space. Don't worry about locking the
  3100. * global_rsv, it doesn't change except when the transaction commits.
  3101. */
  3102. if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
  3103. num_allocated += global_rsv->size;
  3104. /*
  3105. * in limited mode, we want to have some free space up to
  3106. * about 1% of the FS size.
  3107. */
  3108. if (force == CHUNK_ALLOC_LIMITED) {
  3109. thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
  3110. thresh = max_t(u64, 64 * 1024 * 1024,
  3111. div_factor_fine(thresh, 1));
  3112. if (num_bytes - num_allocated < thresh)
  3113. return 1;
  3114. }
  3115. if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
  3116. return 0;
  3117. return 1;
  3118. }
  3119. static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
  3120. {
  3121. u64 num_dev;
  3122. if (type & BTRFS_BLOCK_GROUP_RAID10 ||
  3123. type & BTRFS_BLOCK_GROUP_RAID0)
  3124. num_dev = root->fs_info->fs_devices->rw_devices;
  3125. else if (type & BTRFS_BLOCK_GROUP_RAID1)
  3126. num_dev = 2;
  3127. else
  3128. num_dev = 1; /* DUP or single */
  3129. /* metadata for updaing devices and chunk tree */
  3130. return btrfs_calc_trans_metadata_size(root, num_dev + 1);
  3131. }
  3132. static void check_system_chunk(struct btrfs_trans_handle *trans,
  3133. struct btrfs_root *root, u64 type)
  3134. {
  3135. struct btrfs_space_info *info;
  3136. u64 left;
  3137. u64 thresh;
  3138. info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3139. spin_lock(&info->lock);
  3140. left = info->total_bytes - info->bytes_used - info->bytes_pinned -
  3141. info->bytes_reserved - info->bytes_readonly;
  3142. spin_unlock(&info->lock);
  3143. thresh = get_system_chunk_thresh(root, type);
  3144. if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
  3145. printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
  3146. left, thresh, type);
  3147. dump_space_info(info, 0, 0);
  3148. }
  3149. if (left < thresh) {
  3150. u64 flags;
  3151. flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
  3152. btrfs_alloc_chunk(trans, root, flags);
  3153. }
  3154. }
  3155. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  3156. struct btrfs_root *extent_root, u64 flags, int force)
  3157. {
  3158. struct btrfs_space_info *space_info;
  3159. struct btrfs_fs_info *fs_info = extent_root->fs_info;
  3160. int wait_for_alloc = 0;
  3161. int ret = 0;
  3162. /* Don't re-enter if we're already allocating a chunk */
  3163. if (trans->allocating_chunk)
  3164. return -ENOSPC;
  3165. space_info = __find_space_info(extent_root->fs_info, flags);
  3166. if (!space_info) {
  3167. ret = update_space_info(extent_root->fs_info, flags,
  3168. 0, 0, &space_info);
  3169. BUG_ON(ret); /* -ENOMEM */
  3170. }
  3171. BUG_ON(!space_info); /* Logic error */
  3172. again:
  3173. spin_lock(&space_info->lock);
  3174. if (force < space_info->force_alloc)
  3175. force = space_info->force_alloc;
  3176. if (space_info->full) {
  3177. spin_unlock(&space_info->lock);
  3178. return 0;
  3179. }
  3180. if (!should_alloc_chunk(extent_root, space_info, force)) {
  3181. spin_unlock(&space_info->lock);
  3182. return 0;
  3183. } else if (space_info->chunk_alloc) {
  3184. wait_for_alloc = 1;
  3185. } else {
  3186. space_info->chunk_alloc = 1;
  3187. }
  3188. spin_unlock(&space_info->lock);
  3189. mutex_lock(&fs_info->chunk_mutex);
  3190. /*
  3191. * The chunk_mutex is held throughout the entirety of a chunk
  3192. * allocation, so once we've acquired the chunk_mutex we know that the
  3193. * other guy is done and we need to recheck and see if we should
  3194. * allocate.
  3195. */
  3196. if (wait_for_alloc) {
  3197. mutex_unlock(&fs_info->chunk_mutex);
  3198. wait_for_alloc = 0;
  3199. goto again;
  3200. }
  3201. trans->allocating_chunk = true;
  3202. /*
  3203. * If we have mixed data/metadata chunks we want to make sure we keep
  3204. * allocating mixed chunks instead of individual chunks.
  3205. */
  3206. if (btrfs_mixed_space_info(space_info))
  3207. flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
  3208. /*
  3209. * if we're doing a data chunk, go ahead and make sure that
  3210. * we keep a reasonable number of metadata chunks allocated in the
  3211. * FS as well.
  3212. */
  3213. if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
  3214. fs_info->data_chunk_allocations++;
  3215. if (!(fs_info->data_chunk_allocations %
  3216. fs_info->metadata_ratio))
  3217. force_metadata_allocation(fs_info);
  3218. }
  3219. /*
  3220. * Check if we have enough space in SYSTEM chunk because we may need
  3221. * to update devices.
  3222. */
  3223. check_system_chunk(trans, extent_root, flags);
  3224. ret = btrfs_alloc_chunk(trans, extent_root, flags);
  3225. trans->allocating_chunk = false;
  3226. if (ret < 0 && ret != -ENOSPC)
  3227. goto out;
  3228. spin_lock(&space_info->lock);
  3229. if (ret)
  3230. space_info->full = 1;
  3231. else
  3232. ret = 1;
  3233. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  3234. space_info->chunk_alloc = 0;
  3235. spin_unlock(&space_info->lock);
  3236. out:
  3237. mutex_unlock(&fs_info->chunk_mutex);
  3238. return ret;
  3239. }
  3240. static int can_overcommit(struct btrfs_root *root,
  3241. struct btrfs_space_info *space_info, u64 bytes,
  3242. enum btrfs_reserve_flush_enum flush)
  3243. {
  3244. u64 profile = btrfs_get_alloc_profile(root, 0);
  3245. u64 avail;
  3246. u64 used;
  3247. used = space_info->bytes_used + space_info->bytes_reserved +
  3248. space_info->bytes_pinned + space_info->bytes_readonly +
  3249. space_info->bytes_may_use;
  3250. spin_lock(&root->fs_info->free_chunk_lock);
  3251. avail = root->fs_info->free_chunk_space;
  3252. spin_unlock(&root->fs_info->free_chunk_lock);
  3253. /*
  3254. * If we have dup, raid1 or raid10 then only half of the free
  3255. * space is actually useable.
  3256. */
  3257. if (profile & (BTRFS_BLOCK_GROUP_DUP |
  3258. BTRFS_BLOCK_GROUP_RAID1 |
  3259. BTRFS_BLOCK_GROUP_RAID10))
  3260. avail >>= 1;
  3261. /*
  3262. * If we aren't flushing all things, let us overcommit up to
  3263. * 1/2th of the space. If we can flush, don't let us overcommit
  3264. * too much, let it overcommit up to 1/8 of the space.
  3265. */
  3266. if (flush == BTRFS_RESERVE_FLUSH_ALL)
  3267. avail >>= 3;
  3268. else
  3269. avail >>= 1;
  3270. if (used + bytes < space_info->total_bytes + avail)
  3271. return 1;
  3272. return 0;
  3273. }
  3274. static inline int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,
  3275. unsigned long nr_pages,
  3276. enum wb_reason reason)
  3277. {
  3278. /* the flusher is dealing with the dirty inodes now. */
  3279. if (writeback_in_progress(sb->s_bdi))
  3280. return 1;
  3281. if (down_read_trylock(&sb->s_umount)) {
  3282. writeback_inodes_sb_nr(sb, nr_pages, reason);
  3283. up_read(&sb->s_umount);
  3284. return 1;
  3285. }
  3286. return 0;
  3287. }
  3288. void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
  3289. unsigned long nr_pages)
  3290. {
  3291. struct super_block *sb = root->fs_info->sb;
  3292. int started;
  3293. /* If we can not start writeback, just sync all the delalloc file. */
  3294. started = writeback_inodes_sb_nr_if_idle_safe(sb, nr_pages,
  3295. WB_REASON_FS_FREE_SPACE);
  3296. if (!started) {
  3297. /*
  3298. * We needn't worry the filesystem going from r/w to r/o though
  3299. * we don't acquire ->s_umount mutex, because the filesystem
  3300. * should guarantee the delalloc inodes list be empty after
  3301. * the filesystem is readonly(all dirty pages are written to
  3302. * the disk).
  3303. */
  3304. btrfs_start_delalloc_inodes(root, 0);
  3305. btrfs_wait_ordered_extents(root, 0);
  3306. }
  3307. }
  3308. /*
  3309. * shrink metadata reservation for delalloc
  3310. */
  3311. static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
  3312. bool wait_ordered)
  3313. {
  3314. struct btrfs_block_rsv *block_rsv;
  3315. struct btrfs_space_info *space_info;
  3316. struct btrfs_trans_handle *trans;
  3317. u64 delalloc_bytes;
  3318. u64 max_reclaim;
  3319. long time_left;
  3320. unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
  3321. int loops = 0;
  3322. enum btrfs_reserve_flush_enum flush;
  3323. trans = (struct btrfs_trans_handle *)current->journal_info;
  3324. block_rsv = &root->fs_info->delalloc_block_rsv;
  3325. space_info = block_rsv->space_info;
  3326. smp_mb();
  3327. delalloc_bytes = percpu_counter_sum_positive(
  3328. &root->fs_info->delalloc_bytes);
  3329. if (delalloc_bytes == 0) {
  3330. if (trans)
  3331. return;
  3332. btrfs_wait_ordered_extents(root, 0);
  3333. return;
  3334. }
  3335. while (delalloc_bytes && loops < 3) {
  3336. max_reclaim = min(delalloc_bytes, to_reclaim);
  3337. nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
  3338. btrfs_writeback_inodes_sb_nr(root, nr_pages);
  3339. /*
  3340. * We need to wait for the async pages to actually start before
  3341. * we do anything.
  3342. */
  3343. wait_event(root->fs_info->async_submit_wait,
  3344. !atomic_read(&root->fs_info->async_delalloc_pages));
  3345. if (!trans)
  3346. flush = BTRFS_RESERVE_FLUSH_ALL;
  3347. else
  3348. flush = BTRFS_RESERVE_NO_FLUSH;
  3349. spin_lock(&space_info->lock);
  3350. if (can_overcommit(root, space_info, orig, flush)) {
  3351. spin_unlock(&space_info->lock);
  3352. break;
  3353. }
  3354. spin_unlock(&space_info->lock);
  3355. loops++;
  3356. if (wait_ordered && !trans) {
  3357. btrfs_wait_ordered_extents(root, 0);
  3358. } else {
  3359. time_left = schedule_timeout_killable(1);
  3360. if (time_left)
  3361. break;
  3362. }
  3363. smp_mb();
  3364. delalloc_bytes = percpu_counter_sum_positive(
  3365. &root->fs_info->delalloc_bytes);
  3366. }
  3367. }
  3368. /**
  3369. * maybe_commit_transaction - possibly commit the transaction if its ok to
  3370. * @root - the root we're allocating for
  3371. * @bytes - the number of bytes we want to reserve
  3372. * @force - force the commit
  3373. *
  3374. * This will check to make sure that committing the transaction will actually
  3375. * get us somewhere and then commit the transaction if it does. Otherwise it
  3376. * will return -ENOSPC.
  3377. */
  3378. static int may_commit_transaction(struct btrfs_root *root,
  3379. struct btrfs_space_info *space_info,
  3380. u64 bytes, int force)
  3381. {
  3382. struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
  3383. struct btrfs_trans_handle *trans;
  3384. trans = (struct btrfs_trans_handle *)current->journal_info;
  3385. if (trans)
  3386. return -EAGAIN;
  3387. if (force)
  3388. goto commit;
  3389. /* See if there is enough pinned space to make this reservation */
  3390. spin_lock(&space_info->lock);
  3391. if (space_info->bytes_pinned >= bytes) {
  3392. spin_unlock(&space_info->lock);
  3393. goto commit;
  3394. }
  3395. spin_unlock(&space_info->lock);
  3396. /*
  3397. * See if there is some space in the delayed insertion reservation for
  3398. * this reservation.
  3399. */
  3400. if (space_info != delayed_rsv->space_info)
  3401. return -ENOSPC;
  3402. spin_lock(&space_info->lock);
  3403. spin_lock(&delayed_rsv->lock);
  3404. if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
  3405. spin_unlock(&delayed_rsv->lock);
  3406. spin_unlock(&space_info->lock);
  3407. return -ENOSPC;
  3408. }
  3409. spin_unlock(&delayed_rsv->lock);
  3410. spin_unlock(&space_info->lock);
  3411. commit:
  3412. trans = btrfs_join_transaction(root);
  3413. if (IS_ERR(trans))
  3414. return -ENOSPC;
  3415. return btrfs_commit_transaction(trans, root);
  3416. }
  3417. enum flush_state {
  3418. FLUSH_DELAYED_ITEMS_NR = 1,
  3419. FLUSH_DELAYED_ITEMS = 2,
  3420. FLUSH_DELALLOC = 3,
  3421. FLUSH_DELALLOC_WAIT = 4,
  3422. ALLOC_CHUNK = 5,
  3423. COMMIT_TRANS = 6,
  3424. };
  3425. static int flush_space(struct btrfs_root *root,
  3426. struct btrfs_space_info *space_info, u64 num_bytes,
  3427. u64 orig_bytes, int state)
  3428. {
  3429. struct btrfs_trans_handle *trans;
  3430. int nr;
  3431. int ret = 0;
  3432. switch (state) {
  3433. case FLUSH_DELAYED_ITEMS_NR:
  3434. case FLUSH_DELAYED_ITEMS:
  3435. if (state == FLUSH_DELAYED_ITEMS_NR) {
  3436. u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
  3437. nr = (int)div64_u64(num_bytes, bytes);
  3438. if (!nr)
  3439. nr = 1;
  3440. nr *= 2;
  3441. } else {
  3442. nr = -1;
  3443. }
  3444. trans = btrfs_join_transaction(root);
  3445. if (IS_ERR(trans)) {
  3446. ret = PTR_ERR(trans);
  3447. break;
  3448. }
  3449. ret = btrfs_run_delayed_items_nr(trans, root, nr);
  3450. btrfs_end_transaction(trans, root);
  3451. break;
  3452. case FLUSH_DELALLOC:
  3453. case FLUSH_DELALLOC_WAIT:
  3454. shrink_delalloc(root, num_bytes, orig_bytes,
  3455. state == FLUSH_DELALLOC_WAIT);
  3456. break;
  3457. case ALLOC_CHUNK:
  3458. trans = btrfs_join_transaction(root);
  3459. if (IS_ERR(trans)) {
  3460. ret = PTR_ERR(trans);
  3461. break;
  3462. }
  3463. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  3464. btrfs_get_alloc_profile(root, 0),
  3465. CHUNK_ALLOC_NO_FORCE);
  3466. btrfs_end_transaction(trans, root);
  3467. if (ret == -ENOSPC)
  3468. ret = 0;
  3469. break;
  3470. case COMMIT_TRANS:
  3471. ret = may_commit_transaction(root, space_info, orig_bytes, 0);
  3472. break;
  3473. default:
  3474. ret = -ENOSPC;
  3475. break;
  3476. }
  3477. return ret;
  3478. }
  3479. /**
  3480. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  3481. * @root - the root we're allocating for
  3482. * @block_rsv - the block_rsv we're allocating for
  3483. * @orig_bytes - the number of bytes we want
  3484. * @flush - wether or not we can flush to make our reservation
  3485. *
  3486. * This will reserve orgi_bytes number of bytes from the space info associated
  3487. * with the block_rsv. If there is not enough space it will make an attempt to
  3488. * flush out space to make room. It will do this by flushing delalloc if
  3489. * possible or committing the transaction. If flush is 0 then no attempts to
  3490. * regain reservations will be made and this will fail if there is not enough
  3491. * space already.
  3492. */
  3493. static int reserve_metadata_bytes(struct btrfs_root *root,
  3494. struct btrfs_block_rsv *block_rsv,
  3495. u64 orig_bytes,
  3496. enum btrfs_reserve_flush_enum flush)
  3497. {
  3498. struct btrfs_space_info *space_info = block_rsv->space_info;
  3499. u64 used;
  3500. u64 num_bytes = orig_bytes;
  3501. int flush_state = FLUSH_DELAYED_ITEMS_NR;
  3502. int ret = 0;
  3503. bool flushing = false;
  3504. again:
  3505. ret = 0;
  3506. spin_lock(&space_info->lock);
  3507. /*
  3508. * We only want to wait if somebody other than us is flushing and we
  3509. * are actually allowed to flush all things.
  3510. */
  3511. while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
  3512. space_info->flush) {
  3513. spin_unlock(&space_info->lock);
  3514. /*
  3515. * If we have a trans handle we can't wait because the flusher
  3516. * may have to commit the transaction, which would mean we would
  3517. * deadlock since we are waiting for the flusher to finish, but
  3518. * hold the current transaction open.
  3519. */
  3520. if (current->journal_info)
  3521. return -EAGAIN;
  3522. ret = wait_event_killable(space_info->wait, !space_info->flush);
  3523. /* Must have been killed, return */
  3524. if (ret)
  3525. return -EINTR;
  3526. spin_lock(&space_info->lock);
  3527. }
  3528. ret = -ENOSPC;
  3529. used = space_info->bytes_used + space_info->bytes_reserved +
  3530. space_info->bytes_pinned + space_info->bytes_readonly +
  3531. space_info->bytes_may_use;
  3532. /*
  3533. * The idea here is that we've not already over-reserved the block group
  3534. * then we can go ahead and save our reservation first and then start
  3535. * flushing if we need to. Otherwise if we've already overcommitted
  3536. * lets start flushing stuff first and then come back and try to make
  3537. * our reservation.
  3538. */
  3539. if (used <= space_info->total_bytes) {
  3540. if (used + orig_bytes <= space_info->total_bytes) {
  3541. space_info->bytes_may_use += orig_bytes;
  3542. trace_btrfs_space_reservation(root->fs_info,
  3543. "space_info", space_info->flags, orig_bytes, 1);
  3544. ret = 0;
  3545. } else {
  3546. /*
  3547. * Ok set num_bytes to orig_bytes since we aren't
  3548. * overocmmitted, this way we only try and reclaim what
  3549. * we need.
  3550. */
  3551. num_bytes = orig_bytes;
  3552. }
  3553. } else {
  3554. /*
  3555. * Ok we're over committed, set num_bytes to the overcommitted
  3556. * amount plus the amount of bytes that we need for this
  3557. * reservation.
  3558. */
  3559. num_bytes = used - space_info->total_bytes +
  3560. (orig_bytes * 2);
  3561. }
  3562. if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
  3563. space_info->bytes_may_use += orig_bytes;
  3564. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3565. space_info->flags, orig_bytes,
  3566. 1);
  3567. ret = 0;
  3568. }
  3569. /*
  3570. * Couldn't make our reservation, save our place so while we're trying
  3571. * to reclaim space we can actually use it instead of somebody else
  3572. * stealing it from us.
  3573. *
  3574. * We make the other tasks wait for the flush only when we can flush
  3575. * all things.
  3576. */
  3577. if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
  3578. flushing = true;
  3579. space_info->flush = 1;
  3580. }
  3581. spin_unlock(&space_info->lock);
  3582. if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
  3583. goto out;
  3584. ret = flush_space(root, space_info, num_bytes, orig_bytes,
  3585. flush_state);
  3586. flush_state++;
  3587. /*
  3588. * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
  3589. * would happen. So skip delalloc flush.
  3590. */
  3591. if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
  3592. (flush_state == FLUSH_DELALLOC ||
  3593. flush_state == FLUSH_DELALLOC_WAIT))
  3594. flush_state = ALLOC_CHUNK;
  3595. if (!ret)
  3596. goto again;
  3597. else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
  3598. flush_state < COMMIT_TRANS)
  3599. goto again;
  3600. else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
  3601. flush_state <= COMMIT_TRANS)
  3602. goto again;
  3603. out:
  3604. if (flushing) {
  3605. spin_lock(&space_info->lock);
  3606. space_info->flush = 0;
  3607. wake_up_all(&space_info->wait);
  3608. spin_unlock(&space_info->lock);
  3609. }
  3610. return ret;
  3611. }
  3612. static struct btrfs_block_rsv *get_block_rsv(
  3613. const struct btrfs_trans_handle *trans,
  3614. const struct btrfs_root *root)
  3615. {
  3616. struct btrfs_block_rsv *block_rsv = NULL;
  3617. if (root->ref_cows)
  3618. block_rsv = trans->block_rsv;
  3619. if (root == root->fs_info->csum_root && trans->adding_csums)
  3620. block_rsv = trans->block_rsv;
  3621. if (!block_rsv)
  3622. block_rsv = root->block_rsv;
  3623. if (!block_rsv)
  3624. block_rsv = &root->fs_info->empty_block_rsv;
  3625. return block_rsv;
  3626. }
  3627. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  3628. u64 num_bytes)
  3629. {
  3630. int ret = -ENOSPC;
  3631. spin_lock(&block_rsv->lock);
  3632. if (block_rsv->reserved >= num_bytes) {
  3633. block_rsv->reserved -= num_bytes;
  3634. if (block_rsv->reserved < block_rsv->size)
  3635. block_rsv->full = 0;
  3636. ret = 0;
  3637. }
  3638. spin_unlock(&block_rsv->lock);
  3639. return ret;
  3640. }
  3641. static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
  3642. u64 num_bytes, int update_size)
  3643. {
  3644. spin_lock(&block_rsv->lock);
  3645. block_rsv->reserved += num_bytes;
  3646. if (update_size)
  3647. block_rsv->size += num_bytes;
  3648. else if (block_rsv->reserved >= block_rsv->size)
  3649. block_rsv->full = 1;
  3650. spin_unlock(&block_rsv->lock);
  3651. }
  3652. static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
  3653. struct btrfs_block_rsv *block_rsv,
  3654. struct btrfs_block_rsv *dest, u64 num_bytes)
  3655. {
  3656. struct btrfs_space_info *space_info = block_rsv->space_info;
  3657. spin_lock(&block_rsv->lock);
  3658. if (num_bytes == (u64)-1)
  3659. num_bytes = block_rsv->size;
  3660. block_rsv->size -= num_bytes;
  3661. if (block_rsv->reserved >= block_rsv->size) {
  3662. num_bytes = block_rsv->reserved - block_rsv->size;
  3663. block_rsv->reserved = block_rsv->size;
  3664. block_rsv->full = 1;
  3665. } else {
  3666. num_bytes = 0;
  3667. }
  3668. spin_unlock(&block_rsv->lock);
  3669. if (num_bytes > 0) {
  3670. if (dest) {
  3671. spin_lock(&dest->lock);
  3672. if (!dest->full) {
  3673. u64 bytes_to_add;
  3674. bytes_to_add = dest->size - dest->reserved;
  3675. bytes_to_add = min(num_bytes, bytes_to_add);
  3676. dest->reserved += bytes_to_add;
  3677. if (dest->reserved >= dest->size)
  3678. dest->full = 1;
  3679. num_bytes -= bytes_to_add;
  3680. }
  3681. spin_unlock(&dest->lock);
  3682. }
  3683. if (num_bytes) {
  3684. spin_lock(&space_info->lock);
  3685. space_info->bytes_may_use -= num_bytes;
  3686. trace_btrfs_space_reservation(fs_info, "space_info",
  3687. space_info->flags, num_bytes, 0);
  3688. space_info->reservation_progress++;
  3689. spin_unlock(&space_info->lock);
  3690. }
  3691. }
  3692. }
  3693. static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
  3694. struct btrfs_block_rsv *dst, u64 num_bytes)
  3695. {
  3696. int ret;
  3697. ret = block_rsv_use_bytes(src, num_bytes);
  3698. if (ret)
  3699. return ret;
  3700. block_rsv_add_bytes(dst, num_bytes, 1);
  3701. return 0;
  3702. }
  3703. void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
  3704. {
  3705. memset(rsv, 0, sizeof(*rsv));
  3706. spin_lock_init(&rsv->lock);
  3707. rsv->type = type;
  3708. }
  3709. struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
  3710. unsigned short type)
  3711. {
  3712. struct btrfs_block_rsv *block_rsv;
  3713. struct btrfs_fs_info *fs_info = root->fs_info;
  3714. block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
  3715. if (!block_rsv)
  3716. return NULL;
  3717. btrfs_init_block_rsv(block_rsv, type);
  3718. block_rsv->space_info = __find_space_info(fs_info,
  3719. BTRFS_BLOCK_GROUP_METADATA);
  3720. return block_rsv;
  3721. }
  3722. void btrfs_free_block_rsv(struct btrfs_root *root,
  3723. struct btrfs_block_rsv *rsv)
  3724. {
  3725. if (!rsv)
  3726. return;
  3727. btrfs_block_rsv_release(root, rsv, (u64)-1);
  3728. kfree(rsv);
  3729. }
  3730. int btrfs_block_rsv_add(struct btrfs_root *root,
  3731. struct btrfs_block_rsv *block_rsv, u64 num_bytes,
  3732. enum btrfs_reserve_flush_enum flush)
  3733. {
  3734. int ret;
  3735. if (num_bytes == 0)
  3736. return 0;
  3737. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  3738. if (!ret) {
  3739. block_rsv_add_bytes(block_rsv, num_bytes, 1);
  3740. return 0;
  3741. }
  3742. return ret;
  3743. }
  3744. int btrfs_block_rsv_check(struct btrfs_root *root,
  3745. struct btrfs_block_rsv *block_rsv, int min_factor)
  3746. {
  3747. u64 num_bytes = 0;
  3748. int ret = -ENOSPC;
  3749. if (!block_rsv)
  3750. return 0;
  3751. spin_lock(&block_rsv->lock);
  3752. num_bytes = div_factor(block_rsv->size, min_factor);
  3753. if (block_rsv->reserved >= num_bytes)
  3754. ret = 0;
  3755. spin_unlock(&block_rsv->lock);
  3756. return ret;
  3757. }
  3758. int btrfs_block_rsv_refill(struct btrfs_root *root,
  3759. struct btrfs_block_rsv *block_rsv, u64 min_reserved,
  3760. enum btrfs_reserve_flush_enum flush)
  3761. {
  3762. u64 num_bytes = 0;
  3763. int ret = -ENOSPC;
  3764. if (!block_rsv)
  3765. return 0;
  3766. spin_lock(&block_rsv->lock);
  3767. num_bytes = min_reserved;
  3768. if (block_rsv->reserved >= num_bytes)
  3769. ret = 0;
  3770. else
  3771. num_bytes -= block_rsv->reserved;
  3772. spin_unlock(&block_rsv->lock);
  3773. if (!ret)
  3774. return 0;
  3775. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  3776. if (!ret) {
  3777. block_rsv_add_bytes(block_rsv, num_bytes, 0);
  3778. return 0;
  3779. }
  3780. return ret;
  3781. }
  3782. int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
  3783. struct btrfs_block_rsv *dst_rsv,
  3784. u64 num_bytes)
  3785. {
  3786. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  3787. }
  3788. void btrfs_block_rsv_release(struct btrfs_root *root,
  3789. struct btrfs_block_rsv *block_rsv,
  3790. u64 num_bytes)
  3791. {
  3792. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3793. if (global_rsv->full || global_rsv == block_rsv ||
  3794. block_rsv->space_info != global_rsv->space_info)
  3795. global_rsv = NULL;
  3796. block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
  3797. num_bytes);
  3798. }
  3799. /*
  3800. * helper to calculate size of global block reservation.
  3801. * the desired value is sum of space used by extent tree,
  3802. * checksum tree and root tree
  3803. */
  3804. static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
  3805. {
  3806. struct btrfs_space_info *sinfo;
  3807. u64 num_bytes;
  3808. u64 meta_used;
  3809. u64 data_used;
  3810. int csum_size = btrfs_super_csum_size(fs_info->super_copy);
  3811. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
  3812. spin_lock(&sinfo->lock);
  3813. data_used = sinfo->bytes_used;
  3814. spin_unlock(&sinfo->lock);
  3815. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3816. spin_lock(&sinfo->lock);
  3817. if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
  3818. data_used = 0;
  3819. meta_used = sinfo->bytes_used;
  3820. spin_unlock(&sinfo->lock);
  3821. num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
  3822. csum_size * 2;
  3823. num_bytes += div64_u64(data_used + meta_used, 50);
  3824. if (num_bytes * 3 > meta_used)
  3825. num_bytes = div64_u64(meta_used, 3);
  3826. return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
  3827. }
  3828. static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
  3829. {
  3830. struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
  3831. struct btrfs_space_info *sinfo = block_rsv->space_info;
  3832. u64 num_bytes;
  3833. num_bytes = calc_global_metadata_size(fs_info);
  3834. spin_lock(&sinfo->lock);
  3835. spin_lock(&block_rsv->lock);
  3836. block_rsv->size = num_bytes;
  3837. num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
  3838. sinfo->bytes_reserved + sinfo->bytes_readonly +
  3839. sinfo->bytes_may_use;
  3840. if (sinfo->total_bytes > num_bytes) {
  3841. num_bytes = sinfo->total_bytes - num_bytes;
  3842. block_rsv->reserved += num_bytes;
  3843. sinfo->bytes_may_use += num_bytes;
  3844. trace_btrfs_space_reservation(fs_info, "space_info",
  3845. sinfo->flags, num_bytes, 1);
  3846. }
  3847. if (block_rsv->reserved >= block_rsv->size) {
  3848. num_bytes = block_rsv->reserved - block_rsv->size;
  3849. sinfo->bytes_may_use -= num_bytes;
  3850. trace_btrfs_space_reservation(fs_info, "space_info",
  3851. sinfo->flags, num_bytes, 0);
  3852. sinfo->reservation_progress++;
  3853. block_rsv->reserved = block_rsv->size;
  3854. block_rsv->full = 1;
  3855. }
  3856. spin_unlock(&block_rsv->lock);
  3857. spin_unlock(&sinfo->lock);
  3858. }
  3859. static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
  3860. {
  3861. struct btrfs_space_info *space_info;
  3862. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3863. fs_info->chunk_block_rsv.space_info = space_info;
  3864. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3865. fs_info->global_block_rsv.space_info = space_info;
  3866. fs_info->delalloc_block_rsv.space_info = space_info;
  3867. fs_info->trans_block_rsv.space_info = space_info;
  3868. fs_info->empty_block_rsv.space_info = space_info;
  3869. fs_info->delayed_block_rsv.space_info = space_info;
  3870. fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
  3871. fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
  3872. fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
  3873. fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
  3874. fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
  3875. update_global_block_rsv(fs_info);
  3876. }
  3877. static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
  3878. {
  3879. block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
  3880. (u64)-1);
  3881. WARN_ON(fs_info->delalloc_block_rsv.size > 0);
  3882. WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
  3883. WARN_ON(fs_info->trans_block_rsv.size > 0);
  3884. WARN_ON(fs_info->trans_block_rsv.reserved > 0);
  3885. WARN_ON(fs_info->chunk_block_rsv.size > 0);
  3886. WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
  3887. WARN_ON(fs_info->delayed_block_rsv.size > 0);
  3888. WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
  3889. }
  3890. void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
  3891. struct btrfs_root *root)
  3892. {
  3893. if (!trans->block_rsv)
  3894. return;
  3895. if (!trans->bytes_reserved)
  3896. return;
  3897. trace_btrfs_space_reservation(root->fs_info, "transaction",
  3898. trans->transid, trans->bytes_reserved, 0);
  3899. btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
  3900. trans->bytes_reserved = 0;
  3901. }
  3902. /* Can only return 0 or -ENOSPC */
  3903. int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
  3904. struct inode *inode)
  3905. {
  3906. struct btrfs_root *root = BTRFS_I(inode)->root;
  3907. struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
  3908. struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
  3909. /*
  3910. * We need to hold space in order to delete our orphan item once we've
  3911. * added it, so this takes the reservation so we can release it later
  3912. * when we are truly done with the orphan item.
  3913. */
  3914. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  3915. trace_btrfs_space_reservation(root->fs_info, "orphan",
  3916. btrfs_ino(inode), num_bytes, 1);
  3917. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  3918. }
  3919. void btrfs_orphan_release_metadata(struct inode *inode)
  3920. {
  3921. struct btrfs_root *root = BTRFS_I(inode)->root;
  3922. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  3923. trace_btrfs_space_reservation(root->fs_info, "orphan",
  3924. btrfs_ino(inode), num_bytes, 0);
  3925. btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
  3926. }
  3927. int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
  3928. struct btrfs_pending_snapshot *pending)
  3929. {
  3930. struct btrfs_root *root = pending->root;
  3931. struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
  3932. struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
  3933. /*
  3934. * two for root back/forward refs, two for directory entries,
  3935. * one for root of the snapshot and one for parent inode.
  3936. */
  3937. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 6);
  3938. dst_rsv->space_info = src_rsv->space_info;
  3939. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  3940. }
  3941. /**
  3942. * drop_outstanding_extent - drop an outstanding extent
  3943. * @inode: the inode we're dropping the extent for
  3944. *
  3945. * This is called when we are freeing up an outstanding extent, either called
  3946. * after an error or after an extent is written. This will return the number of
  3947. * reserved extents that need to be freed. This must be called with
  3948. * BTRFS_I(inode)->lock held.
  3949. */
  3950. static unsigned drop_outstanding_extent(struct inode *inode)
  3951. {
  3952. unsigned drop_inode_space = 0;
  3953. unsigned dropped_extents = 0;
  3954. BUG_ON(!BTRFS_I(inode)->outstanding_extents);
  3955. BTRFS_I(inode)->outstanding_extents--;
  3956. if (BTRFS_I(inode)->outstanding_extents == 0 &&
  3957. test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  3958. &BTRFS_I(inode)->runtime_flags))
  3959. drop_inode_space = 1;
  3960. /*
  3961. * If we have more or the same amount of outsanding extents than we have
  3962. * reserved then we need to leave the reserved extents count alone.
  3963. */
  3964. if (BTRFS_I(inode)->outstanding_extents >=
  3965. BTRFS_I(inode)->reserved_extents)
  3966. return drop_inode_space;
  3967. dropped_extents = BTRFS_I(inode)->reserved_extents -
  3968. BTRFS_I(inode)->outstanding_extents;
  3969. BTRFS_I(inode)->reserved_extents -= dropped_extents;
  3970. return dropped_extents + drop_inode_space;
  3971. }
  3972. /**
  3973. * calc_csum_metadata_size - return the amount of metada space that must be
  3974. * reserved/free'd for the given bytes.
  3975. * @inode: the inode we're manipulating
  3976. * @num_bytes: the number of bytes in question
  3977. * @reserve: 1 if we are reserving space, 0 if we are freeing space
  3978. *
  3979. * This adjusts the number of csum_bytes in the inode and then returns the
  3980. * correct amount of metadata that must either be reserved or freed. We
  3981. * calculate how many checksums we can fit into one leaf and then divide the
  3982. * number of bytes that will need to be checksumed by this value to figure out
  3983. * how many checksums will be required. If we are adding bytes then the number
  3984. * may go up and we will return the number of additional bytes that must be
  3985. * reserved. If it is going down we will return the number of bytes that must
  3986. * be freed.
  3987. *
  3988. * This must be called with BTRFS_I(inode)->lock held.
  3989. */
  3990. static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
  3991. int reserve)
  3992. {
  3993. struct btrfs_root *root = BTRFS_I(inode)->root;
  3994. u64 csum_size;
  3995. int num_csums_per_leaf;
  3996. int num_csums;
  3997. int old_csums;
  3998. if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
  3999. BTRFS_I(inode)->csum_bytes == 0)
  4000. return 0;
  4001. old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
  4002. if (reserve)
  4003. BTRFS_I(inode)->csum_bytes += num_bytes;
  4004. else
  4005. BTRFS_I(inode)->csum_bytes -= num_bytes;
  4006. csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
  4007. num_csums_per_leaf = (int)div64_u64(csum_size,
  4008. sizeof(struct btrfs_csum_item) +
  4009. sizeof(struct btrfs_disk_key));
  4010. num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
  4011. num_csums = num_csums + num_csums_per_leaf - 1;
  4012. num_csums = num_csums / num_csums_per_leaf;
  4013. old_csums = old_csums + num_csums_per_leaf - 1;
  4014. old_csums = old_csums / num_csums_per_leaf;
  4015. /* No change, no need to reserve more */
  4016. if (old_csums == num_csums)
  4017. return 0;
  4018. if (reserve)
  4019. return btrfs_calc_trans_metadata_size(root,
  4020. num_csums - old_csums);
  4021. return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
  4022. }
  4023. int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
  4024. {
  4025. struct btrfs_root *root = BTRFS_I(inode)->root;
  4026. struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
  4027. u64 to_reserve = 0;
  4028. u64 csum_bytes;
  4029. unsigned nr_extents = 0;
  4030. int extra_reserve = 0;
  4031. enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
  4032. int ret = 0;
  4033. bool delalloc_lock = true;
  4034. /* If we are a free space inode we need to not flush since we will be in
  4035. * the middle of a transaction commit. We also don't need the delalloc
  4036. * mutex since we won't race with anybody. We need this mostly to make
  4037. * lockdep shut its filthy mouth.
  4038. */
  4039. if (btrfs_is_free_space_inode(inode)) {
  4040. flush = BTRFS_RESERVE_NO_FLUSH;
  4041. delalloc_lock = false;
  4042. }
  4043. if (flush != BTRFS_RESERVE_NO_FLUSH &&
  4044. btrfs_transaction_in_commit(root->fs_info))
  4045. schedule_timeout(1);
  4046. if (delalloc_lock)
  4047. mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
  4048. num_bytes = ALIGN(num_bytes, root->sectorsize);
  4049. spin_lock(&BTRFS_I(inode)->lock);
  4050. BTRFS_I(inode)->outstanding_extents++;
  4051. if (BTRFS_I(inode)->outstanding_extents >
  4052. BTRFS_I(inode)->reserved_extents)
  4053. nr_extents = BTRFS_I(inode)->outstanding_extents -
  4054. BTRFS_I(inode)->reserved_extents;
  4055. /*
  4056. * Add an item to reserve for updating the inode when we complete the
  4057. * delalloc io.
  4058. */
  4059. if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  4060. &BTRFS_I(inode)->runtime_flags)) {
  4061. nr_extents++;
  4062. extra_reserve = 1;
  4063. }
  4064. to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
  4065. to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
  4066. csum_bytes = BTRFS_I(inode)->csum_bytes;
  4067. spin_unlock(&BTRFS_I(inode)->lock);
  4068. if (root->fs_info->quota_enabled)
  4069. ret = btrfs_qgroup_reserve(root, num_bytes +
  4070. nr_extents * root->leafsize);
  4071. /*
  4072. * ret != 0 here means the qgroup reservation failed, we go straight to
  4073. * the shared error handling then.
  4074. */
  4075. if (ret == 0)
  4076. ret = reserve_metadata_bytes(root, block_rsv,
  4077. to_reserve, flush);
  4078. if (ret) {
  4079. u64 to_free = 0;
  4080. unsigned dropped;
  4081. spin_lock(&BTRFS_I(inode)->lock);
  4082. dropped = drop_outstanding_extent(inode);
  4083. /*
  4084. * If the inodes csum_bytes is the same as the original
  4085. * csum_bytes then we know we haven't raced with any free()ers
  4086. * so we can just reduce our inodes csum bytes and carry on.
  4087. * Otherwise we have to do the normal free thing to account for
  4088. * the case that the free side didn't free up its reserve
  4089. * because of this outstanding reservation.
  4090. */
  4091. if (BTRFS_I(inode)->csum_bytes == csum_bytes)
  4092. calc_csum_metadata_size(inode, num_bytes, 0);
  4093. else
  4094. to_free = calc_csum_metadata_size(inode, num_bytes, 0);
  4095. spin_unlock(&BTRFS_I(inode)->lock);
  4096. if (dropped)
  4097. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  4098. if (to_free) {
  4099. btrfs_block_rsv_release(root, block_rsv, to_free);
  4100. trace_btrfs_space_reservation(root->fs_info,
  4101. "delalloc",
  4102. btrfs_ino(inode),
  4103. to_free, 0);
  4104. }
  4105. if (root->fs_info->quota_enabled) {
  4106. btrfs_qgroup_free(root, num_bytes +
  4107. nr_extents * root->leafsize);
  4108. }
  4109. if (delalloc_lock)
  4110. mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  4111. return ret;
  4112. }
  4113. spin_lock(&BTRFS_I(inode)->lock);
  4114. if (extra_reserve) {
  4115. set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  4116. &BTRFS_I(inode)->runtime_flags);
  4117. nr_extents--;
  4118. }
  4119. BTRFS_I(inode)->reserved_extents += nr_extents;
  4120. spin_unlock(&BTRFS_I(inode)->lock);
  4121. if (delalloc_lock)
  4122. mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  4123. if (to_reserve)
  4124. trace_btrfs_space_reservation(root->fs_info,"delalloc",
  4125. btrfs_ino(inode), to_reserve, 1);
  4126. block_rsv_add_bytes(block_rsv, to_reserve, 1);
  4127. return 0;
  4128. }
  4129. /**
  4130. * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
  4131. * @inode: the inode to release the reservation for
  4132. * @num_bytes: the number of bytes we're releasing
  4133. *
  4134. * This will release the metadata reservation for an inode. This can be called
  4135. * once we complete IO for a given set of bytes to release their metadata
  4136. * reservations.
  4137. */
  4138. void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
  4139. {
  4140. struct btrfs_root *root = BTRFS_I(inode)->root;
  4141. u64 to_free = 0;
  4142. unsigned dropped;
  4143. num_bytes = ALIGN(num_bytes, root->sectorsize);
  4144. spin_lock(&BTRFS_I(inode)->lock);
  4145. dropped = drop_outstanding_extent(inode);
  4146. to_free = calc_csum_metadata_size(inode, num_bytes, 0);
  4147. spin_unlock(&BTRFS_I(inode)->lock);
  4148. if (dropped > 0)
  4149. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  4150. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  4151. btrfs_ino(inode), to_free, 0);
  4152. if (root->fs_info->quota_enabled) {
  4153. btrfs_qgroup_free(root, num_bytes +
  4154. dropped * root->leafsize);
  4155. }
  4156. btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
  4157. to_free);
  4158. }
  4159. /**
  4160. * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
  4161. * @inode: inode we're writing to
  4162. * @num_bytes: the number of bytes we want to allocate
  4163. *
  4164. * This will do the following things
  4165. *
  4166. * o reserve space in the data space info for num_bytes
  4167. * o reserve space in the metadata space info based on number of outstanding
  4168. * extents and how much csums will be needed
  4169. * o add to the inodes ->delalloc_bytes
  4170. * o add it to the fs_info's delalloc inodes list.
  4171. *
  4172. * This will return 0 for success and -ENOSPC if there is no space left.
  4173. */
  4174. int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
  4175. {
  4176. int ret;
  4177. ret = btrfs_check_data_free_space(inode, num_bytes);
  4178. if (ret)
  4179. return ret;
  4180. ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
  4181. if (ret) {
  4182. btrfs_free_reserved_data_space(inode, num_bytes);
  4183. return ret;
  4184. }
  4185. return 0;
  4186. }
  4187. /**
  4188. * btrfs_delalloc_release_space - release data and metadata space for delalloc
  4189. * @inode: inode we're releasing space for
  4190. * @num_bytes: the number of bytes we want to free up
  4191. *
  4192. * This must be matched with a call to btrfs_delalloc_reserve_space. This is
  4193. * called in the case that we don't need the metadata AND data reservations
  4194. * anymore. So if there is an error or we insert an inline extent.
  4195. *
  4196. * This function will release the metadata space that was not used and will
  4197. * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
  4198. * list if there are no delalloc bytes left.
  4199. */
  4200. void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
  4201. {
  4202. btrfs_delalloc_release_metadata(inode, num_bytes);
  4203. btrfs_free_reserved_data_space(inode, num_bytes);
  4204. }
  4205. static int update_block_group(struct btrfs_root *root,
  4206. u64 bytenr, u64 num_bytes, int alloc)
  4207. {
  4208. struct btrfs_block_group_cache *cache = NULL;
  4209. struct btrfs_fs_info *info = root->fs_info;
  4210. u64 total = num_bytes;
  4211. u64 old_val;
  4212. u64 byte_in_group;
  4213. int factor;
  4214. /* block accounting for super block */
  4215. spin_lock(&info->delalloc_lock);
  4216. old_val = btrfs_super_bytes_used(info->super_copy);
  4217. if (alloc)
  4218. old_val += num_bytes;
  4219. else
  4220. old_val -= num_bytes;
  4221. btrfs_set_super_bytes_used(info->super_copy, old_val);
  4222. spin_unlock(&info->delalloc_lock);
  4223. while (total) {
  4224. cache = btrfs_lookup_block_group(info, bytenr);
  4225. if (!cache)
  4226. return -ENOENT;
  4227. if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
  4228. BTRFS_BLOCK_GROUP_RAID1 |
  4229. BTRFS_BLOCK_GROUP_RAID10))
  4230. factor = 2;
  4231. else
  4232. factor = 1;
  4233. /*
  4234. * If this block group has free space cache written out, we
  4235. * need to make sure to load it if we are removing space. This
  4236. * is because we need the unpinning stage to actually add the
  4237. * space back to the block group, otherwise we will leak space.
  4238. */
  4239. if (!alloc && cache->cached == BTRFS_CACHE_NO)
  4240. cache_block_group(cache, 1);
  4241. byte_in_group = bytenr - cache->key.objectid;
  4242. WARN_ON(byte_in_group > cache->key.offset);
  4243. spin_lock(&cache->space_info->lock);
  4244. spin_lock(&cache->lock);
  4245. if (btrfs_test_opt(root, SPACE_CACHE) &&
  4246. cache->disk_cache_state < BTRFS_DC_CLEAR)
  4247. cache->disk_cache_state = BTRFS_DC_CLEAR;
  4248. cache->dirty = 1;
  4249. old_val = btrfs_block_group_used(&cache->item);
  4250. num_bytes = min(total, cache->key.offset - byte_in_group);
  4251. if (alloc) {
  4252. old_val += num_bytes;
  4253. btrfs_set_block_group_used(&cache->item, old_val);
  4254. cache->reserved -= num_bytes;
  4255. cache->space_info->bytes_reserved -= num_bytes;
  4256. cache->space_info->bytes_used += num_bytes;
  4257. cache->space_info->disk_used += num_bytes * factor;
  4258. spin_unlock(&cache->lock);
  4259. spin_unlock(&cache->space_info->lock);
  4260. } else {
  4261. old_val -= num_bytes;
  4262. btrfs_set_block_group_used(&cache->item, old_val);
  4263. cache->pinned += num_bytes;
  4264. cache->space_info->bytes_pinned += num_bytes;
  4265. cache->space_info->bytes_used -= num_bytes;
  4266. cache->space_info->disk_used -= num_bytes * factor;
  4267. spin_unlock(&cache->lock);
  4268. spin_unlock(&cache->space_info->lock);
  4269. set_extent_dirty(info->pinned_extents,
  4270. bytenr, bytenr + num_bytes - 1,
  4271. GFP_NOFS | __GFP_NOFAIL);
  4272. }
  4273. btrfs_put_block_group(cache);
  4274. total -= num_bytes;
  4275. bytenr += num_bytes;
  4276. }
  4277. return 0;
  4278. }
  4279. static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
  4280. {
  4281. struct btrfs_block_group_cache *cache;
  4282. u64 bytenr;
  4283. spin_lock(&root->fs_info->block_group_cache_lock);
  4284. bytenr = root->fs_info->first_logical_byte;
  4285. spin_unlock(&root->fs_info->block_group_cache_lock);
  4286. if (bytenr < (u64)-1)
  4287. return bytenr;
  4288. cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
  4289. if (!cache)
  4290. return 0;
  4291. bytenr = cache->key.objectid;
  4292. btrfs_put_block_group(cache);
  4293. return bytenr;
  4294. }
  4295. static int pin_down_extent(struct btrfs_root *root,
  4296. struct btrfs_block_group_cache *cache,
  4297. u64 bytenr, u64 num_bytes, int reserved)
  4298. {
  4299. spin_lock(&cache->space_info->lock);
  4300. spin_lock(&cache->lock);
  4301. cache->pinned += num_bytes;
  4302. cache->space_info->bytes_pinned += num_bytes;
  4303. if (reserved) {
  4304. cache->reserved -= num_bytes;
  4305. cache->space_info->bytes_reserved -= num_bytes;
  4306. }
  4307. spin_unlock(&cache->lock);
  4308. spin_unlock(&cache->space_info->lock);
  4309. set_extent_dirty(root->fs_info->pinned_extents, bytenr,
  4310. bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
  4311. return 0;
  4312. }
  4313. /*
  4314. * this function must be called within transaction
  4315. */
  4316. int btrfs_pin_extent(struct btrfs_root *root,
  4317. u64 bytenr, u64 num_bytes, int reserved)
  4318. {
  4319. struct btrfs_block_group_cache *cache;
  4320. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  4321. BUG_ON(!cache); /* Logic error */
  4322. pin_down_extent(root, cache, bytenr, num_bytes, reserved);
  4323. btrfs_put_block_group(cache);
  4324. return 0;
  4325. }
  4326. /*
  4327. * this function must be called within transaction
  4328. */
  4329. int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
  4330. u64 bytenr, u64 num_bytes)
  4331. {
  4332. struct btrfs_block_group_cache *cache;
  4333. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  4334. BUG_ON(!cache); /* Logic error */
  4335. /*
  4336. * pull in the free space cache (if any) so that our pin
  4337. * removes the free space from the cache. We have load_only set
  4338. * to one because the slow code to read in the free extents does check
  4339. * the pinned extents.
  4340. */
  4341. cache_block_group(cache, 1);
  4342. pin_down_extent(root, cache, bytenr, num_bytes, 0);
  4343. /* remove us from the free space cache (if we're there at all) */
  4344. btrfs_remove_free_space(cache, bytenr, num_bytes);
  4345. btrfs_put_block_group(cache);
  4346. return 0;
  4347. }
  4348. /**
  4349. * btrfs_update_reserved_bytes - update the block_group and space info counters
  4350. * @cache: The cache we are manipulating
  4351. * @num_bytes: The number of bytes in question
  4352. * @reserve: One of the reservation enums
  4353. *
  4354. * This is called by the allocator when it reserves space, or by somebody who is
  4355. * freeing space that was never actually used on disk. For example if you
  4356. * reserve some space for a new leaf in transaction A and before transaction A
  4357. * commits you free that leaf, you call this with reserve set to 0 in order to
  4358. * clear the reservation.
  4359. *
  4360. * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
  4361. * ENOSPC accounting. For data we handle the reservation through clearing the
  4362. * delalloc bits in the io_tree. We have to do this since we could end up
  4363. * allocating less disk space for the amount of data we have reserved in the
  4364. * case of compression.
  4365. *
  4366. * If this is a reservation and the block group has become read only we cannot
  4367. * make the reservation and return -EAGAIN, otherwise this function always
  4368. * succeeds.
  4369. */
  4370. static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
  4371. u64 num_bytes, int reserve)
  4372. {
  4373. struct btrfs_space_info *space_info = cache->space_info;
  4374. int ret = 0;
  4375. spin_lock(&space_info->lock);
  4376. spin_lock(&cache->lock);
  4377. if (reserve != RESERVE_FREE) {
  4378. if (cache->ro) {
  4379. ret = -EAGAIN;
  4380. } else {
  4381. cache->reserved += num_bytes;
  4382. space_info->bytes_reserved += num_bytes;
  4383. if (reserve == RESERVE_ALLOC) {
  4384. trace_btrfs_space_reservation(cache->fs_info,
  4385. "space_info", space_info->flags,
  4386. num_bytes, 0);
  4387. space_info->bytes_may_use -= num_bytes;
  4388. }
  4389. }
  4390. } else {
  4391. if (cache->ro)
  4392. space_info->bytes_readonly += num_bytes;
  4393. cache->reserved -= num_bytes;
  4394. space_info->bytes_reserved -= num_bytes;
  4395. space_info->reservation_progress++;
  4396. }
  4397. spin_unlock(&cache->lock);
  4398. spin_unlock(&space_info->lock);
  4399. return ret;
  4400. }
  4401. void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
  4402. struct btrfs_root *root)
  4403. {
  4404. struct btrfs_fs_info *fs_info = root->fs_info;
  4405. struct btrfs_caching_control *next;
  4406. struct btrfs_caching_control *caching_ctl;
  4407. struct btrfs_block_group_cache *cache;
  4408. down_write(&fs_info->extent_commit_sem);
  4409. list_for_each_entry_safe(caching_ctl, next,
  4410. &fs_info->caching_block_groups, list) {
  4411. cache = caching_ctl->block_group;
  4412. if (block_group_cache_done(cache)) {
  4413. cache->last_byte_to_unpin = (u64)-1;
  4414. list_del_init(&caching_ctl->list);
  4415. put_caching_control(caching_ctl);
  4416. } else {
  4417. cache->last_byte_to_unpin = caching_ctl->progress;
  4418. }
  4419. }
  4420. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  4421. fs_info->pinned_extents = &fs_info->freed_extents[1];
  4422. else
  4423. fs_info->pinned_extents = &fs_info->freed_extents[0];
  4424. up_write(&fs_info->extent_commit_sem);
  4425. update_global_block_rsv(fs_info);
  4426. }
  4427. static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  4428. {
  4429. struct btrfs_fs_info *fs_info = root->fs_info;
  4430. struct btrfs_block_group_cache *cache = NULL;
  4431. struct btrfs_space_info *space_info;
  4432. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4433. u64 len;
  4434. bool readonly;
  4435. while (start <= end) {
  4436. readonly = false;
  4437. if (!cache ||
  4438. start >= cache->key.objectid + cache->key.offset) {
  4439. if (cache)
  4440. btrfs_put_block_group(cache);
  4441. cache = btrfs_lookup_block_group(fs_info, start);
  4442. BUG_ON(!cache); /* Logic error */
  4443. }
  4444. len = cache->key.objectid + cache->key.offset - start;
  4445. len = min(len, end + 1 - start);
  4446. if (start < cache->last_byte_to_unpin) {
  4447. len = min(len, cache->last_byte_to_unpin - start);
  4448. btrfs_add_free_space(cache, start, len);
  4449. }
  4450. start += len;
  4451. space_info = cache->space_info;
  4452. spin_lock(&space_info->lock);
  4453. spin_lock(&cache->lock);
  4454. cache->pinned -= len;
  4455. space_info->bytes_pinned -= len;
  4456. if (cache->ro) {
  4457. space_info->bytes_readonly += len;
  4458. readonly = true;
  4459. }
  4460. spin_unlock(&cache->lock);
  4461. if (!readonly && global_rsv->space_info == space_info) {
  4462. spin_lock(&global_rsv->lock);
  4463. if (!global_rsv->full) {
  4464. len = min(len, global_rsv->size -
  4465. global_rsv->reserved);
  4466. global_rsv->reserved += len;
  4467. space_info->bytes_may_use += len;
  4468. if (global_rsv->reserved >= global_rsv->size)
  4469. global_rsv->full = 1;
  4470. }
  4471. spin_unlock(&global_rsv->lock);
  4472. }
  4473. spin_unlock(&space_info->lock);
  4474. }
  4475. if (cache)
  4476. btrfs_put_block_group(cache);
  4477. return 0;
  4478. }
  4479. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  4480. struct btrfs_root *root)
  4481. {
  4482. struct btrfs_fs_info *fs_info = root->fs_info;
  4483. struct extent_io_tree *unpin;
  4484. u64 start;
  4485. u64 end;
  4486. int ret;
  4487. if (trans->aborted)
  4488. return 0;
  4489. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  4490. unpin = &fs_info->freed_extents[1];
  4491. else
  4492. unpin = &fs_info->freed_extents[0];
  4493. while (1) {
  4494. ret = find_first_extent_bit(unpin, 0, &start, &end,
  4495. EXTENT_DIRTY, NULL);
  4496. if (ret)
  4497. break;
  4498. if (btrfs_test_opt(root, DISCARD))
  4499. ret = btrfs_discard_extent(root, start,
  4500. end + 1 - start, NULL);
  4501. clear_extent_dirty(unpin, start, end, GFP_NOFS);
  4502. unpin_extent_range(root, start, end);
  4503. cond_resched();
  4504. }
  4505. return 0;
  4506. }
  4507. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  4508. struct btrfs_root *root,
  4509. u64 bytenr, u64 num_bytes, u64 parent,
  4510. u64 root_objectid, u64 owner_objectid,
  4511. u64 owner_offset, int refs_to_drop,
  4512. struct btrfs_delayed_extent_op *extent_op)
  4513. {
  4514. struct btrfs_key key;
  4515. struct btrfs_path *path;
  4516. struct btrfs_fs_info *info = root->fs_info;
  4517. struct btrfs_root *extent_root = info->extent_root;
  4518. struct extent_buffer *leaf;
  4519. struct btrfs_extent_item *ei;
  4520. struct btrfs_extent_inline_ref *iref;
  4521. int ret;
  4522. int is_data;
  4523. int extent_slot = 0;
  4524. int found_extent = 0;
  4525. int num_to_del = 1;
  4526. u32 item_size;
  4527. u64 refs;
  4528. path = btrfs_alloc_path();
  4529. if (!path)
  4530. return -ENOMEM;
  4531. path->reada = 1;
  4532. path->leave_spinning = 1;
  4533. is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
  4534. BUG_ON(!is_data && refs_to_drop != 1);
  4535. ret = lookup_extent_backref(trans, extent_root, path, &iref,
  4536. bytenr, num_bytes, parent,
  4537. root_objectid, owner_objectid,
  4538. owner_offset);
  4539. if (ret == 0) {
  4540. extent_slot = path->slots[0];
  4541. while (extent_slot >= 0) {
  4542. btrfs_item_key_to_cpu(path->nodes[0], &key,
  4543. extent_slot);
  4544. if (key.objectid != bytenr)
  4545. break;
  4546. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  4547. key.offset == num_bytes) {
  4548. found_extent = 1;
  4549. break;
  4550. }
  4551. if (path->slots[0] - extent_slot > 5)
  4552. break;
  4553. extent_slot--;
  4554. }
  4555. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  4556. item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
  4557. if (found_extent && item_size < sizeof(*ei))
  4558. found_extent = 0;
  4559. #endif
  4560. if (!found_extent) {
  4561. BUG_ON(iref);
  4562. ret = remove_extent_backref(trans, extent_root, path,
  4563. NULL, refs_to_drop,
  4564. is_data);
  4565. if (ret) {
  4566. btrfs_abort_transaction(trans, extent_root, ret);
  4567. goto out;
  4568. }
  4569. btrfs_release_path(path);
  4570. path->leave_spinning = 1;
  4571. key.objectid = bytenr;
  4572. key.type = BTRFS_EXTENT_ITEM_KEY;
  4573. key.offset = num_bytes;
  4574. ret = btrfs_search_slot(trans, extent_root,
  4575. &key, path, -1, 1);
  4576. if (ret) {
  4577. printk(KERN_ERR "umm, got %d back from search"
  4578. ", was looking for %llu\n", ret,
  4579. (unsigned long long)bytenr);
  4580. if (ret > 0)
  4581. btrfs_print_leaf(extent_root,
  4582. path->nodes[0]);
  4583. }
  4584. if (ret < 0) {
  4585. btrfs_abort_transaction(trans, extent_root, ret);
  4586. goto out;
  4587. }
  4588. extent_slot = path->slots[0];
  4589. }
  4590. } else if (ret == -ENOENT) {
  4591. btrfs_print_leaf(extent_root, path->nodes[0]);
  4592. WARN_ON(1);
  4593. printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
  4594. "parent %llu root %llu owner %llu offset %llu\n",
  4595. (unsigned long long)bytenr,
  4596. (unsigned long long)parent,
  4597. (unsigned long long)root_objectid,
  4598. (unsigned long long)owner_objectid,
  4599. (unsigned long long)owner_offset);
  4600. } else {
  4601. btrfs_abort_transaction(trans, extent_root, ret);
  4602. goto out;
  4603. }
  4604. leaf = path->nodes[0];
  4605. item_size = btrfs_item_size_nr(leaf, extent_slot);
  4606. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  4607. if (item_size < sizeof(*ei)) {
  4608. BUG_ON(found_extent || extent_slot != path->slots[0]);
  4609. ret = convert_extent_item_v0(trans, extent_root, path,
  4610. owner_objectid, 0);
  4611. if (ret < 0) {
  4612. btrfs_abort_transaction(trans, extent_root, ret);
  4613. goto out;
  4614. }
  4615. btrfs_release_path(path);
  4616. path->leave_spinning = 1;
  4617. key.objectid = bytenr;
  4618. key.type = BTRFS_EXTENT_ITEM_KEY;
  4619. key.offset = num_bytes;
  4620. ret = btrfs_search_slot(trans, extent_root, &key, path,
  4621. -1, 1);
  4622. if (ret) {
  4623. printk(KERN_ERR "umm, got %d back from search"
  4624. ", was looking for %llu\n", ret,
  4625. (unsigned long long)bytenr);
  4626. btrfs_print_leaf(extent_root, path->nodes[0]);
  4627. }
  4628. if (ret < 0) {
  4629. btrfs_abort_transaction(trans, extent_root, ret);
  4630. goto out;
  4631. }
  4632. extent_slot = path->slots[0];
  4633. leaf = path->nodes[0];
  4634. item_size = btrfs_item_size_nr(leaf, extent_slot);
  4635. }
  4636. #endif
  4637. BUG_ON(item_size < sizeof(*ei));
  4638. ei = btrfs_item_ptr(leaf, extent_slot,
  4639. struct btrfs_extent_item);
  4640. if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
  4641. struct btrfs_tree_block_info *bi;
  4642. BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
  4643. bi = (struct btrfs_tree_block_info *)(ei + 1);
  4644. WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
  4645. }
  4646. refs = btrfs_extent_refs(leaf, ei);
  4647. BUG_ON(refs < refs_to_drop);
  4648. refs -= refs_to_drop;
  4649. if (refs > 0) {
  4650. if (extent_op)
  4651. __run_delayed_extent_op(extent_op, leaf, ei);
  4652. /*
  4653. * In the case of inline back ref, reference count will
  4654. * be updated by remove_extent_backref
  4655. */
  4656. if (iref) {
  4657. BUG_ON(!found_extent);
  4658. } else {
  4659. btrfs_set_extent_refs(leaf, ei, refs);
  4660. btrfs_mark_buffer_dirty(leaf);
  4661. }
  4662. if (found_extent) {
  4663. ret = remove_extent_backref(trans, extent_root, path,
  4664. iref, refs_to_drop,
  4665. is_data);
  4666. if (ret) {
  4667. btrfs_abort_transaction(trans, extent_root, ret);
  4668. goto out;
  4669. }
  4670. }
  4671. } else {
  4672. if (found_extent) {
  4673. BUG_ON(is_data && refs_to_drop !=
  4674. extent_data_ref_count(root, path, iref));
  4675. if (iref) {
  4676. BUG_ON(path->slots[0] != extent_slot);
  4677. } else {
  4678. BUG_ON(path->slots[0] != extent_slot + 1);
  4679. path->slots[0] = extent_slot;
  4680. num_to_del = 2;
  4681. }
  4682. }
  4683. ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
  4684. num_to_del);
  4685. if (ret) {
  4686. btrfs_abort_transaction(trans, extent_root, ret);
  4687. goto out;
  4688. }
  4689. btrfs_release_path(path);
  4690. if (is_data) {
  4691. ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
  4692. if (ret) {
  4693. btrfs_abort_transaction(trans, extent_root, ret);
  4694. goto out;
  4695. }
  4696. }
  4697. ret = update_block_group(root, bytenr, num_bytes, 0);
  4698. if (ret) {
  4699. btrfs_abort_transaction(trans, extent_root, ret);
  4700. goto out;
  4701. }
  4702. }
  4703. out:
  4704. btrfs_free_path(path);
  4705. return ret;
  4706. }
  4707. /*
  4708. * when we free an block, it is possible (and likely) that we free the last
  4709. * delayed ref for that extent as well. This searches the delayed ref tree for
  4710. * a given extent, and if there are no other delayed refs to be processed, it
  4711. * removes it from the tree.
  4712. */
  4713. static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
  4714. struct btrfs_root *root, u64 bytenr)
  4715. {
  4716. struct btrfs_delayed_ref_head *head;
  4717. struct btrfs_delayed_ref_root *delayed_refs;
  4718. struct btrfs_delayed_ref_node *ref;
  4719. struct rb_node *node;
  4720. int ret = 0;
  4721. delayed_refs = &trans->transaction->delayed_refs;
  4722. spin_lock(&delayed_refs->lock);
  4723. head = btrfs_find_delayed_ref_head(trans, bytenr);
  4724. if (!head)
  4725. goto out;
  4726. node = rb_prev(&head->node.rb_node);
  4727. if (!node)
  4728. goto out;
  4729. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  4730. /* there are still entries for this ref, we can't drop it */
  4731. if (ref->bytenr == bytenr)
  4732. goto out;
  4733. if (head->extent_op) {
  4734. if (!head->must_insert_reserved)
  4735. goto out;
  4736. btrfs_free_delayed_extent_op(head->extent_op);
  4737. head->extent_op = NULL;
  4738. }
  4739. /*
  4740. * waiting for the lock here would deadlock. If someone else has it
  4741. * locked they are already in the process of dropping it anyway
  4742. */
  4743. if (!mutex_trylock(&head->mutex))
  4744. goto out;
  4745. /*
  4746. * at this point we have a head with no other entries. Go
  4747. * ahead and process it.
  4748. */
  4749. head->node.in_tree = 0;
  4750. rb_erase(&head->node.rb_node, &delayed_refs->root);
  4751. delayed_refs->num_entries--;
  4752. /*
  4753. * we don't take a ref on the node because we're removing it from the
  4754. * tree, so we just steal the ref the tree was holding.
  4755. */
  4756. delayed_refs->num_heads--;
  4757. if (list_empty(&head->cluster))
  4758. delayed_refs->num_heads_ready--;
  4759. list_del_init(&head->cluster);
  4760. spin_unlock(&delayed_refs->lock);
  4761. BUG_ON(head->extent_op);
  4762. if (head->must_insert_reserved)
  4763. ret = 1;
  4764. mutex_unlock(&head->mutex);
  4765. btrfs_put_delayed_ref(&head->node);
  4766. return ret;
  4767. out:
  4768. spin_unlock(&delayed_refs->lock);
  4769. return 0;
  4770. }
  4771. void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
  4772. struct btrfs_root *root,
  4773. struct extent_buffer *buf,
  4774. u64 parent, int last_ref)
  4775. {
  4776. struct btrfs_block_group_cache *cache = NULL;
  4777. int ret;
  4778. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  4779. ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
  4780. buf->start, buf->len,
  4781. parent, root->root_key.objectid,
  4782. btrfs_header_level(buf),
  4783. BTRFS_DROP_DELAYED_REF, NULL, 0);
  4784. BUG_ON(ret); /* -ENOMEM */
  4785. }
  4786. if (!last_ref)
  4787. return;
  4788. cache = btrfs_lookup_block_group(root->fs_info, buf->start);
  4789. if (btrfs_header_generation(buf) == trans->transid) {
  4790. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  4791. ret = check_ref_cleanup(trans, root, buf->start);
  4792. if (!ret)
  4793. goto out;
  4794. }
  4795. if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
  4796. pin_down_extent(root, cache, buf->start, buf->len, 1);
  4797. goto out;
  4798. }
  4799. WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
  4800. btrfs_add_free_space(cache, buf->start, buf->len);
  4801. btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
  4802. }
  4803. out:
  4804. /*
  4805. * Deleting the buffer, clear the corrupt flag since it doesn't matter
  4806. * anymore.
  4807. */
  4808. clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
  4809. btrfs_put_block_group(cache);
  4810. }
  4811. /* Can return -ENOMEM */
  4812. int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  4813. u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
  4814. u64 owner, u64 offset, int for_cow)
  4815. {
  4816. int ret;
  4817. struct btrfs_fs_info *fs_info = root->fs_info;
  4818. /*
  4819. * tree log blocks never actually go into the extent allocation
  4820. * tree, just update pinning info and exit early.
  4821. */
  4822. if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
  4823. WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
  4824. /* unlocks the pinned mutex */
  4825. btrfs_pin_extent(root, bytenr, num_bytes, 1);
  4826. ret = 0;
  4827. } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  4828. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  4829. num_bytes,
  4830. parent, root_objectid, (int)owner,
  4831. BTRFS_DROP_DELAYED_REF, NULL, for_cow);
  4832. } else {
  4833. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  4834. num_bytes,
  4835. parent, root_objectid, owner,
  4836. offset, BTRFS_DROP_DELAYED_REF,
  4837. NULL, for_cow);
  4838. }
  4839. return ret;
  4840. }
  4841. static u64 stripe_align(struct btrfs_root *root, u64 val)
  4842. {
  4843. u64 mask = ((u64)root->stripesize - 1);
  4844. u64 ret = (val + mask) & ~mask;
  4845. return ret;
  4846. }
  4847. /*
  4848. * when we wait for progress in the block group caching, its because
  4849. * our allocation attempt failed at least once. So, we must sleep
  4850. * and let some progress happen before we try again.
  4851. *
  4852. * This function will sleep at least once waiting for new free space to
  4853. * show up, and then it will check the block group free space numbers
  4854. * for our min num_bytes. Another option is to have it go ahead
  4855. * and look in the rbtree for a free extent of a given size, but this
  4856. * is a good start.
  4857. */
  4858. static noinline int
  4859. wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
  4860. u64 num_bytes)
  4861. {
  4862. struct btrfs_caching_control *caching_ctl;
  4863. DEFINE_WAIT(wait);
  4864. caching_ctl = get_caching_control(cache);
  4865. if (!caching_ctl)
  4866. return 0;
  4867. wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
  4868. (cache->free_space_ctl->free_space >= num_bytes));
  4869. put_caching_control(caching_ctl);
  4870. return 0;
  4871. }
  4872. static noinline int
  4873. wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
  4874. {
  4875. struct btrfs_caching_control *caching_ctl;
  4876. DEFINE_WAIT(wait);
  4877. caching_ctl = get_caching_control(cache);
  4878. if (!caching_ctl)
  4879. return 0;
  4880. wait_event(caching_ctl->wait, block_group_cache_done(cache));
  4881. put_caching_control(caching_ctl);
  4882. return 0;
  4883. }
  4884. int __get_raid_index(u64 flags)
  4885. {
  4886. if (flags & BTRFS_BLOCK_GROUP_RAID10)
  4887. return BTRFS_RAID_RAID10;
  4888. else if (flags & BTRFS_BLOCK_GROUP_RAID1)
  4889. return BTRFS_RAID_RAID1;
  4890. else if (flags & BTRFS_BLOCK_GROUP_DUP)
  4891. return BTRFS_RAID_DUP;
  4892. else if (flags & BTRFS_BLOCK_GROUP_RAID0)
  4893. return BTRFS_RAID_RAID0;
  4894. else
  4895. return BTRFS_RAID_SINGLE;
  4896. }
  4897. static int get_block_group_index(struct btrfs_block_group_cache *cache)
  4898. {
  4899. return __get_raid_index(cache->flags);
  4900. }
  4901. enum btrfs_loop_type {
  4902. LOOP_CACHING_NOWAIT = 0,
  4903. LOOP_CACHING_WAIT = 1,
  4904. LOOP_ALLOC_CHUNK = 2,
  4905. LOOP_NO_EMPTY_SIZE = 3,
  4906. };
  4907. /*
  4908. * walks the btree of allocated extents and find a hole of a given size.
  4909. * The key ins is changed to record the hole:
  4910. * ins->objectid == block start
  4911. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  4912. * ins->offset == number of blocks
  4913. * Any available blocks before search_start are skipped.
  4914. */
  4915. static noinline int find_free_extent(struct btrfs_trans_handle *trans,
  4916. struct btrfs_root *orig_root,
  4917. u64 num_bytes, u64 empty_size,
  4918. u64 hint_byte, struct btrfs_key *ins,
  4919. u64 data)
  4920. {
  4921. int ret = 0;
  4922. struct btrfs_root *root = orig_root->fs_info->extent_root;
  4923. struct btrfs_free_cluster *last_ptr = NULL;
  4924. struct btrfs_block_group_cache *block_group = NULL;
  4925. struct btrfs_block_group_cache *used_block_group;
  4926. u64 search_start = 0;
  4927. int empty_cluster = 2 * 1024 * 1024;
  4928. struct btrfs_space_info *space_info;
  4929. int loop = 0;
  4930. int index = __get_raid_index(data);
  4931. int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
  4932. RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
  4933. bool found_uncached_bg = false;
  4934. bool failed_cluster_refill = false;
  4935. bool failed_alloc = false;
  4936. bool use_cluster = true;
  4937. bool have_caching_bg = false;
  4938. WARN_ON(num_bytes < root->sectorsize);
  4939. btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
  4940. ins->objectid = 0;
  4941. ins->offset = 0;
  4942. trace_find_free_extent(orig_root, num_bytes, empty_size, data);
  4943. space_info = __find_space_info(root->fs_info, data);
  4944. if (!space_info) {
  4945. printk(KERN_ERR "No space info for %llu\n", data);
  4946. return -ENOSPC;
  4947. }
  4948. /*
  4949. * If the space info is for both data and metadata it means we have a
  4950. * small filesystem and we can't use the clustering stuff.
  4951. */
  4952. if (btrfs_mixed_space_info(space_info))
  4953. use_cluster = false;
  4954. if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
  4955. last_ptr = &root->fs_info->meta_alloc_cluster;
  4956. if (!btrfs_test_opt(root, SSD))
  4957. empty_cluster = 64 * 1024;
  4958. }
  4959. if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
  4960. btrfs_test_opt(root, SSD)) {
  4961. last_ptr = &root->fs_info->data_alloc_cluster;
  4962. }
  4963. if (last_ptr) {
  4964. spin_lock(&last_ptr->lock);
  4965. if (last_ptr->block_group)
  4966. hint_byte = last_ptr->window_start;
  4967. spin_unlock(&last_ptr->lock);
  4968. }
  4969. search_start = max(search_start, first_logical_byte(root, 0));
  4970. search_start = max(search_start, hint_byte);
  4971. if (!last_ptr)
  4972. empty_cluster = 0;
  4973. if (search_start == hint_byte) {
  4974. block_group = btrfs_lookup_block_group(root->fs_info,
  4975. search_start);
  4976. used_block_group = block_group;
  4977. /*
  4978. * we don't want to use the block group if it doesn't match our
  4979. * allocation bits, or if its not cached.
  4980. *
  4981. * However if we are re-searching with an ideal block group
  4982. * picked out then we don't care that the block group is cached.
  4983. */
  4984. if (block_group && block_group_bits(block_group, data) &&
  4985. block_group->cached != BTRFS_CACHE_NO) {
  4986. down_read(&space_info->groups_sem);
  4987. if (list_empty(&block_group->list) ||
  4988. block_group->ro) {
  4989. /*
  4990. * someone is removing this block group,
  4991. * we can't jump into the have_block_group
  4992. * target because our list pointers are not
  4993. * valid
  4994. */
  4995. btrfs_put_block_group(block_group);
  4996. up_read(&space_info->groups_sem);
  4997. } else {
  4998. index = get_block_group_index(block_group);
  4999. goto have_block_group;
  5000. }
  5001. } else if (block_group) {
  5002. btrfs_put_block_group(block_group);
  5003. }
  5004. }
  5005. search:
  5006. have_caching_bg = false;
  5007. down_read(&space_info->groups_sem);
  5008. list_for_each_entry(block_group, &space_info->block_groups[index],
  5009. list) {
  5010. u64 offset;
  5011. int cached;
  5012. used_block_group = block_group;
  5013. btrfs_get_block_group(block_group);
  5014. search_start = block_group->key.objectid;
  5015. /*
  5016. * this can happen if we end up cycling through all the
  5017. * raid types, but we want to make sure we only allocate
  5018. * for the proper type.
  5019. */
  5020. if (!block_group_bits(block_group, data)) {
  5021. u64 extra = BTRFS_BLOCK_GROUP_DUP |
  5022. BTRFS_BLOCK_GROUP_RAID1 |
  5023. BTRFS_BLOCK_GROUP_RAID10;
  5024. /*
  5025. * if they asked for extra copies and this block group
  5026. * doesn't provide them, bail. This does allow us to
  5027. * fill raid0 from raid1.
  5028. */
  5029. if ((data & extra) && !(block_group->flags & extra))
  5030. goto loop;
  5031. }
  5032. have_block_group:
  5033. cached = block_group_cache_done(block_group);
  5034. if (unlikely(!cached)) {
  5035. found_uncached_bg = true;
  5036. ret = cache_block_group(block_group, 0);
  5037. BUG_ON(ret < 0);
  5038. ret = 0;
  5039. }
  5040. if (unlikely(block_group->ro))
  5041. goto loop;
  5042. /*
  5043. * Ok we want to try and use the cluster allocator, so
  5044. * lets look there
  5045. */
  5046. if (last_ptr) {
  5047. /*
  5048. * the refill lock keeps out other
  5049. * people trying to start a new cluster
  5050. */
  5051. spin_lock(&last_ptr->refill_lock);
  5052. used_block_group = last_ptr->block_group;
  5053. if (used_block_group != block_group &&
  5054. (!used_block_group ||
  5055. used_block_group->ro ||
  5056. !block_group_bits(used_block_group, data))) {
  5057. used_block_group = block_group;
  5058. goto refill_cluster;
  5059. }
  5060. if (used_block_group != block_group)
  5061. btrfs_get_block_group(used_block_group);
  5062. offset = btrfs_alloc_from_cluster(used_block_group,
  5063. last_ptr, num_bytes, used_block_group->key.objectid);
  5064. if (offset) {
  5065. /* we have a block, we're done */
  5066. spin_unlock(&last_ptr->refill_lock);
  5067. trace_btrfs_reserve_extent_cluster(root,
  5068. block_group, search_start, num_bytes);
  5069. goto checks;
  5070. }
  5071. WARN_ON(last_ptr->block_group != used_block_group);
  5072. if (used_block_group != block_group) {
  5073. btrfs_put_block_group(used_block_group);
  5074. used_block_group = block_group;
  5075. }
  5076. refill_cluster:
  5077. BUG_ON(used_block_group != block_group);
  5078. /* If we are on LOOP_NO_EMPTY_SIZE, we can't
  5079. * set up a new clusters, so lets just skip it
  5080. * and let the allocator find whatever block
  5081. * it can find. If we reach this point, we
  5082. * will have tried the cluster allocator
  5083. * plenty of times and not have found
  5084. * anything, so we are likely way too
  5085. * fragmented for the clustering stuff to find
  5086. * anything.
  5087. *
  5088. * However, if the cluster is taken from the
  5089. * current block group, release the cluster
  5090. * first, so that we stand a better chance of
  5091. * succeeding in the unclustered
  5092. * allocation. */
  5093. if (loop >= LOOP_NO_EMPTY_SIZE &&
  5094. last_ptr->block_group != block_group) {
  5095. spin_unlock(&last_ptr->refill_lock);
  5096. goto unclustered_alloc;
  5097. }
  5098. /*
  5099. * this cluster didn't work out, free it and
  5100. * start over
  5101. */
  5102. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  5103. if (loop >= LOOP_NO_EMPTY_SIZE) {
  5104. spin_unlock(&last_ptr->refill_lock);
  5105. goto unclustered_alloc;
  5106. }
  5107. /* allocate a cluster in this block group */
  5108. ret = btrfs_find_space_cluster(trans, root,
  5109. block_group, last_ptr,
  5110. search_start, num_bytes,
  5111. empty_cluster + empty_size);
  5112. if (ret == 0) {
  5113. /*
  5114. * now pull our allocation out of this
  5115. * cluster
  5116. */
  5117. offset = btrfs_alloc_from_cluster(block_group,
  5118. last_ptr, num_bytes,
  5119. search_start);
  5120. if (offset) {
  5121. /* we found one, proceed */
  5122. spin_unlock(&last_ptr->refill_lock);
  5123. trace_btrfs_reserve_extent_cluster(root,
  5124. block_group, search_start,
  5125. num_bytes);
  5126. goto checks;
  5127. }
  5128. } else if (!cached && loop > LOOP_CACHING_NOWAIT
  5129. && !failed_cluster_refill) {
  5130. spin_unlock(&last_ptr->refill_lock);
  5131. failed_cluster_refill = true;
  5132. wait_block_group_cache_progress(block_group,
  5133. num_bytes + empty_cluster + empty_size);
  5134. goto have_block_group;
  5135. }
  5136. /*
  5137. * at this point we either didn't find a cluster
  5138. * or we weren't able to allocate a block from our
  5139. * cluster. Free the cluster we've been trying
  5140. * to use, and go to the next block group
  5141. */
  5142. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  5143. spin_unlock(&last_ptr->refill_lock);
  5144. goto loop;
  5145. }
  5146. unclustered_alloc:
  5147. spin_lock(&block_group->free_space_ctl->tree_lock);
  5148. if (cached &&
  5149. block_group->free_space_ctl->free_space <
  5150. num_bytes + empty_cluster + empty_size) {
  5151. spin_unlock(&block_group->free_space_ctl->tree_lock);
  5152. goto loop;
  5153. }
  5154. spin_unlock(&block_group->free_space_ctl->tree_lock);
  5155. offset = btrfs_find_space_for_alloc(block_group, search_start,
  5156. num_bytes, empty_size);
  5157. /*
  5158. * If we didn't find a chunk, and we haven't failed on this
  5159. * block group before, and this block group is in the middle of
  5160. * caching and we are ok with waiting, then go ahead and wait
  5161. * for progress to be made, and set failed_alloc to true.
  5162. *
  5163. * If failed_alloc is true then we've already waited on this
  5164. * block group once and should move on to the next block group.
  5165. */
  5166. if (!offset && !failed_alloc && !cached &&
  5167. loop > LOOP_CACHING_NOWAIT) {
  5168. wait_block_group_cache_progress(block_group,
  5169. num_bytes + empty_size);
  5170. failed_alloc = true;
  5171. goto have_block_group;
  5172. } else if (!offset) {
  5173. if (!cached)
  5174. have_caching_bg = true;
  5175. goto loop;
  5176. }
  5177. checks:
  5178. search_start = stripe_align(root, offset);
  5179. /* move on to the next group */
  5180. if (search_start + num_bytes >
  5181. used_block_group->key.objectid + used_block_group->key.offset) {
  5182. btrfs_add_free_space(used_block_group, offset, num_bytes);
  5183. goto loop;
  5184. }
  5185. if (offset < search_start)
  5186. btrfs_add_free_space(used_block_group, offset,
  5187. search_start - offset);
  5188. BUG_ON(offset > search_start);
  5189. ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
  5190. alloc_type);
  5191. if (ret == -EAGAIN) {
  5192. btrfs_add_free_space(used_block_group, offset, num_bytes);
  5193. goto loop;
  5194. }
  5195. /* we are all good, lets return */
  5196. ins->objectid = search_start;
  5197. ins->offset = num_bytes;
  5198. trace_btrfs_reserve_extent(orig_root, block_group,
  5199. search_start, num_bytes);
  5200. if (used_block_group != block_group)
  5201. btrfs_put_block_group(used_block_group);
  5202. btrfs_put_block_group(block_group);
  5203. break;
  5204. loop:
  5205. failed_cluster_refill = false;
  5206. failed_alloc = false;
  5207. BUG_ON(index != get_block_group_index(block_group));
  5208. if (used_block_group != block_group)
  5209. btrfs_put_block_group(used_block_group);
  5210. btrfs_put_block_group(block_group);
  5211. }
  5212. up_read(&space_info->groups_sem);
  5213. if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
  5214. goto search;
  5215. if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
  5216. goto search;
  5217. /*
  5218. * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
  5219. * caching kthreads as we move along
  5220. * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
  5221. * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
  5222. * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
  5223. * again
  5224. */
  5225. if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
  5226. index = 0;
  5227. loop++;
  5228. if (loop == LOOP_ALLOC_CHUNK) {
  5229. ret = do_chunk_alloc(trans, root, data,
  5230. CHUNK_ALLOC_FORCE);
  5231. /*
  5232. * Do not bail out on ENOSPC since we
  5233. * can do more things.
  5234. */
  5235. if (ret < 0 && ret != -ENOSPC) {
  5236. btrfs_abort_transaction(trans,
  5237. root, ret);
  5238. goto out;
  5239. }
  5240. }
  5241. if (loop == LOOP_NO_EMPTY_SIZE) {
  5242. empty_size = 0;
  5243. empty_cluster = 0;
  5244. }
  5245. goto search;
  5246. } else if (!ins->objectid) {
  5247. ret = -ENOSPC;
  5248. } else if (ins->objectid) {
  5249. ret = 0;
  5250. }
  5251. out:
  5252. return ret;
  5253. }
  5254. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  5255. int dump_block_groups)
  5256. {
  5257. struct btrfs_block_group_cache *cache;
  5258. int index = 0;
  5259. spin_lock(&info->lock);
  5260. printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
  5261. (unsigned long long)info->flags,
  5262. (unsigned long long)(info->total_bytes - info->bytes_used -
  5263. info->bytes_pinned - info->bytes_reserved -
  5264. info->bytes_readonly),
  5265. (info->full) ? "" : "not ");
  5266. printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
  5267. "reserved=%llu, may_use=%llu, readonly=%llu\n",
  5268. (unsigned long long)info->total_bytes,
  5269. (unsigned long long)info->bytes_used,
  5270. (unsigned long long)info->bytes_pinned,
  5271. (unsigned long long)info->bytes_reserved,
  5272. (unsigned long long)info->bytes_may_use,
  5273. (unsigned long long)info->bytes_readonly);
  5274. spin_unlock(&info->lock);
  5275. if (!dump_block_groups)
  5276. return;
  5277. down_read(&info->groups_sem);
  5278. again:
  5279. list_for_each_entry(cache, &info->block_groups[index], list) {
  5280. spin_lock(&cache->lock);
  5281. printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
  5282. (unsigned long long)cache->key.objectid,
  5283. (unsigned long long)cache->key.offset,
  5284. (unsigned long long)btrfs_block_group_used(&cache->item),
  5285. (unsigned long long)cache->pinned,
  5286. (unsigned long long)cache->reserved,
  5287. cache->ro ? "[readonly]" : "");
  5288. btrfs_dump_free_space(cache, bytes);
  5289. spin_unlock(&cache->lock);
  5290. }
  5291. if (++index < BTRFS_NR_RAID_TYPES)
  5292. goto again;
  5293. up_read(&info->groups_sem);
  5294. }
  5295. int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
  5296. struct btrfs_root *root,
  5297. u64 num_bytes, u64 min_alloc_size,
  5298. u64 empty_size, u64 hint_byte,
  5299. struct btrfs_key *ins, u64 data)
  5300. {
  5301. bool final_tried = false;
  5302. int ret;
  5303. data = btrfs_get_alloc_profile(root, data);
  5304. again:
  5305. WARN_ON(num_bytes < root->sectorsize);
  5306. ret = find_free_extent(trans, root, num_bytes, empty_size,
  5307. hint_byte, ins, data);
  5308. if (ret == -ENOSPC) {
  5309. if (!final_tried) {
  5310. num_bytes = num_bytes >> 1;
  5311. num_bytes = num_bytes & ~(root->sectorsize - 1);
  5312. num_bytes = max(num_bytes, min_alloc_size);
  5313. if (num_bytes == min_alloc_size)
  5314. final_tried = true;
  5315. goto again;
  5316. } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
  5317. struct btrfs_space_info *sinfo;
  5318. sinfo = __find_space_info(root->fs_info, data);
  5319. printk(KERN_ERR "btrfs allocation failed flags %llu, "
  5320. "wanted %llu\n", (unsigned long long)data,
  5321. (unsigned long long)num_bytes);
  5322. if (sinfo)
  5323. dump_space_info(sinfo, num_bytes, 1);
  5324. }
  5325. }
  5326. trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
  5327. return ret;
  5328. }
  5329. static int __btrfs_free_reserved_extent(struct btrfs_root *root,
  5330. u64 start, u64 len, int pin)
  5331. {
  5332. struct btrfs_block_group_cache *cache;
  5333. int ret = 0;
  5334. cache = btrfs_lookup_block_group(root->fs_info, start);
  5335. if (!cache) {
  5336. printk(KERN_ERR "Unable to find block group for %llu\n",
  5337. (unsigned long long)start);
  5338. return -ENOSPC;
  5339. }
  5340. if (btrfs_test_opt(root, DISCARD))
  5341. ret = btrfs_discard_extent(root, start, len, NULL);
  5342. if (pin)
  5343. pin_down_extent(root, cache, start, len, 1);
  5344. else {
  5345. btrfs_add_free_space(cache, start, len);
  5346. btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
  5347. }
  5348. btrfs_put_block_group(cache);
  5349. trace_btrfs_reserved_extent_free(root, start, len);
  5350. return ret;
  5351. }
  5352. int btrfs_free_reserved_extent(struct btrfs_root *root,
  5353. u64 start, u64 len)
  5354. {
  5355. return __btrfs_free_reserved_extent(root, start, len, 0);
  5356. }
  5357. int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
  5358. u64 start, u64 len)
  5359. {
  5360. return __btrfs_free_reserved_extent(root, start, len, 1);
  5361. }
  5362. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  5363. struct btrfs_root *root,
  5364. u64 parent, u64 root_objectid,
  5365. u64 flags, u64 owner, u64 offset,
  5366. struct btrfs_key *ins, int ref_mod)
  5367. {
  5368. int ret;
  5369. struct btrfs_fs_info *fs_info = root->fs_info;
  5370. struct btrfs_extent_item *extent_item;
  5371. struct btrfs_extent_inline_ref *iref;
  5372. struct btrfs_path *path;
  5373. struct extent_buffer *leaf;
  5374. int type;
  5375. u32 size;
  5376. if (parent > 0)
  5377. type = BTRFS_SHARED_DATA_REF_KEY;
  5378. else
  5379. type = BTRFS_EXTENT_DATA_REF_KEY;
  5380. size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
  5381. path = btrfs_alloc_path();
  5382. if (!path)
  5383. return -ENOMEM;
  5384. path->leave_spinning = 1;
  5385. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  5386. ins, size);
  5387. if (ret) {
  5388. btrfs_free_path(path);
  5389. return ret;
  5390. }
  5391. leaf = path->nodes[0];
  5392. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  5393. struct btrfs_extent_item);
  5394. btrfs_set_extent_refs(leaf, extent_item, ref_mod);
  5395. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  5396. btrfs_set_extent_flags(leaf, extent_item,
  5397. flags | BTRFS_EXTENT_FLAG_DATA);
  5398. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  5399. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  5400. if (parent > 0) {
  5401. struct btrfs_shared_data_ref *ref;
  5402. ref = (struct btrfs_shared_data_ref *)(iref + 1);
  5403. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  5404. btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
  5405. } else {
  5406. struct btrfs_extent_data_ref *ref;
  5407. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  5408. btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
  5409. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  5410. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  5411. btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
  5412. }
  5413. btrfs_mark_buffer_dirty(path->nodes[0]);
  5414. btrfs_free_path(path);
  5415. ret = update_block_group(root, ins->objectid, ins->offset, 1);
  5416. if (ret) { /* -ENOENT, logic error */
  5417. printk(KERN_ERR "btrfs update block group failed for %llu "
  5418. "%llu\n", (unsigned long long)ins->objectid,
  5419. (unsigned long long)ins->offset);
  5420. BUG();
  5421. }
  5422. return ret;
  5423. }
  5424. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  5425. struct btrfs_root *root,
  5426. u64 parent, u64 root_objectid,
  5427. u64 flags, struct btrfs_disk_key *key,
  5428. int level, struct btrfs_key *ins)
  5429. {
  5430. int ret;
  5431. struct btrfs_fs_info *fs_info = root->fs_info;
  5432. struct btrfs_extent_item *extent_item;
  5433. struct btrfs_tree_block_info *block_info;
  5434. struct btrfs_extent_inline_ref *iref;
  5435. struct btrfs_path *path;
  5436. struct extent_buffer *leaf;
  5437. u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
  5438. path = btrfs_alloc_path();
  5439. if (!path)
  5440. return -ENOMEM;
  5441. path->leave_spinning = 1;
  5442. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  5443. ins, size);
  5444. if (ret) {
  5445. btrfs_free_path(path);
  5446. return ret;
  5447. }
  5448. leaf = path->nodes[0];
  5449. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  5450. struct btrfs_extent_item);
  5451. btrfs_set_extent_refs(leaf, extent_item, 1);
  5452. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  5453. btrfs_set_extent_flags(leaf, extent_item,
  5454. flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
  5455. block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
  5456. btrfs_set_tree_block_key(leaf, block_info, key);
  5457. btrfs_set_tree_block_level(leaf, block_info, level);
  5458. iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
  5459. if (parent > 0) {
  5460. BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
  5461. btrfs_set_extent_inline_ref_type(leaf, iref,
  5462. BTRFS_SHARED_BLOCK_REF_KEY);
  5463. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  5464. } else {
  5465. btrfs_set_extent_inline_ref_type(leaf, iref,
  5466. BTRFS_TREE_BLOCK_REF_KEY);
  5467. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  5468. }
  5469. btrfs_mark_buffer_dirty(leaf);
  5470. btrfs_free_path(path);
  5471. ret = update_block_group(root, ins->objectid, ins->offset, 1);
  5472. if (ret) { /* -ENOENT, logic error */
  5473. printk(KERN_ERR "btrfs update block group failed for %llu "
  5474. "%llu\n", (unsigned long long)ins->objectid,
  5475. (unsigned long long)ins->offset);
  5476. BUG();
  5477. }
  5478. return ret;
  5479. }
  5480. int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  5481. struct btrfs_root *root,
  5482. u64 root_objectid, u64 owner,
  5483. u64 offset, struct btrfs_key *ins)
  5484. {
  5485. int ret;
  5486. BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
  5487. ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
  5488. ins->offset, 0,
  5489. root_objectid, owner, offset,
  5490. BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
  5491. return ret;
  5492. }
  5493. /*
  5494. * this is used by the tree logging recovery code. It records that
  5495. * an extent has been allocated and makes sure to clear the free
  5496. * space cache bits as well
  5497. */
  5498. int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
  5499. struct btrfs_root *root,
  5500. u64 root_objectid, u64 owner, u64 offset,
  5501. struct btrfs_key *ins)
  5502. {
  5503. int ret;
  5504. struct btrfs_block_group_cache *block_group;
  5505. struct btrfs_caching_control *caching_ctl;
  5506. u64 start = ins->objectid;
  5507. u64 num_bytes = ins->offset;
  5508. block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
  5509. cache_block_group(block_group, 0);
  5510. caching_ctl = get_caching_control(block_group);
  5511. if (!caching_ctl) {
  5512. BUG_ON(!block_group_cache_done(block_group));
  5513. ret = btrfs_remove_free_space(block_group, start, num_bytes);
  5514. BUG_ON(ret); /* -ENOMEM */
  5515. } else {
  5516. mutex_lock(&caching_ctl->mutex);
  5517. if (start >= caching_ctl->progress) {
  5518. ret = add_excluded_extent(root, start, num_bytes);
  5519. BUG_ON(ret); /* -ENOMEM */
  5520. } else if (start + num_bytes <= caching_ctl->progress) {
  5521. ret = btrfs_remove_free_space(block_group,
  5522. start, num_bytes);
  5523. BUG_ON(ret); /* -ENOMEM */
  5524. } else {
  5525. num_bytes = caching_ctl->progress - start;
  5526. ret = btrfs_remove_free_space(block_group,
  5527. start, num_bytes);
  5528. BUG_ON(ret); /* -ENOMEM */
  5529. start = caching_ctl->progress;
  5530. num_bytes = ins->objectid + ins->offset -
  5531. caching_ctl->progress;
  5532. ret = add_excluded_extent(root, start, num_bytes);
  5533. BUG_ON(ret); /* -ENOMEM */
  5534. }
  5535. mutex_unlock(&caching_ctl->mutex);
  5536. put_caching_control(caching_ctl);
  5537. }
  5538. ret = btrfs_update_reserved_bytes(block_group, ins->offset,
  5539. RESERVE_ALLOC_NO_ACCOUNT);
  5540. BUG_ON(ret); /* logic error */
  5541. btrfs_put_block_group(block_group);
  5542. ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
  5543. 0, owner, offset, ins, 1);
  5544. return ret;
  5545. }
  5546. struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
  5547. struct btrfs_root *root,
  5548. u64 bytenr, u32 blocksize,
  5549. int level)
  5550. {
  5551. struct extent_buffer *buf;
  5552. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  5553. if (!buf)
  5554. return ERR_PTR(-ENOMEM);
  5555. btrfs_set_header_generation(buf, trans->transid);
  5556. btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
  5557. btrfs_tree_lock(buf);
  5558. clean_tree_block(trans, root, buf);
  5559. clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
  5560. btrfs_set_lock_blocking(buf);
  5561. btrfs_set_buffer_uptodate(buf);
  5562. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  5563. /*
  5564. * we allow two log transactions at a time, use different
  5565. * EXENT bit to differentiate dirty pages.
  5566. */
  5567. if (root->log_transid % 2 == 0)
  5568. set_extent_dirty(&root->dirty_log_pages, buf->start,
  5569. buf->start + buf->len - 1, GFP_NOFS);
  5570. else
  5571. set_extent_new(&root->dirty_log_pages, buf->start,
  5572. buf->start + buf->len - 1, GFP_NOFS);
  5573. } else {
  5574. set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
  5575. buf->start + buf->len - 1, GFP_NOFS);
  5576. }
  5577. trans->blocks_used++;
  5578. /* this returns a buffer locked for blocking */
  5579. return buf;
  5580. }
  5581. static struct btrfs_block_rsv *
  5582. use_block_rsv(struct btrfs_trans_handle *trans,
  5583. struct btrfs_root *root, u32 blocksize)
  5584. {
  5585. struct btrfs_block_rsv *block_rsv;
  5586. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  5587. int ret;
  5588. block_rsv = get_block_rsv(trans, root);
  5589. if (block_rsv->size == 0) {
  5590. ret = reserve_metadata_bytes(root, block_rsv, blocksize,
  5591. BTRFS_RESERVE_NO_FLUSH);
  5592. /*
  5593. * If we couldn't reserve metadata bytes try and use some from
  5594. * the global reserve.
  5595. */
  5596. if (ret && block_rsv != global_rsv) {
  5597. ret = block_rsv_use_bytes(global_rsv, blocksize);
  5598. if (!ret)
  5599. return global_rsv;
  5600. return ERR_PTR(ret);
  5601. } else if (ret) {
  5602. return ERR_PTR(ret);
  5603. }
  5604. return block_rsv;
  5605. }
  5606. ret = block_rsv_use_bytes(block_rsv, blocksize);
  5607. if (!ret)
  5608. return block_rsv;
  5609. if (ret && !block_rsv->failfast) {
  5610. static DEFINE_RATELIMIT_STATE(_rs,
  5611. DEFAULT_RATELIMIT_INTERVAL,
  5612. /*DEFAULT_RATELIMIT_BURST*/ 2);
  5613. if (__ratelimit(&_rs))
  5614. WARN(1, KERN_DEBUG "btrfs: block rsv returned %d\n",
  5615. ret);
  5616. ret = reserve_metadata_bytes(root, block_rsv, blocksize,
  5617. BTRFS_RESERVE_NO_FLUSH);
  5618. if (!ret) {
  5619. return block_rsv;
  5620. } else if (ret && block_rsv != global_rsv) {
  5621. ret = block_rsv_use_bytes(global_rsv, blocksize);
  5622. if (!ret)
  5623. return global_rsv;
  5624. }
  5625. }
  5626. return ERR_PTR(-ENOSPC);
  5627. }
  5628. static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
  5629. struct btrfs_block_rsv *block_rsv, u32 blocksize)
  5630. {
  5631. block_rsv_add_bytes(block_rsv, blocksize, 0);
  5632. block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
  5633. }
  5634. /*
  5635. * finds a free extent and does all the dirty work required for allocation
  5636. * returns the key for the extent through ins, and a tree buffer for
  5637. * the first block of the extent through buf.
  5638. *
  5639. * returns the tree buffer or NULL.
  5640. */
  5641. struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
  5642. struct btrfs_root *root, u32 blocksize,
  5643. u64 parent, u64 root_objectid,
  5644. struct btrfs_disk_key *key, int level,
  5645. u64 hint, u64 empty_size)
  5646. {
  5647. struct btrfs_key ins;
  5648. struct btrfs_block_rsv *block_rsv;
  5649. struct extent_buffer *buf;
  5650. u64 flags = 0;
  5651. int ret;
  5652. block_rsv = use_block_rsv(trans, root, blocksize);
  5653. if (IS_ERR(block_rsv))
  5654. return ERR_CAST(block_rsv);
  5655. ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
  5656. empty_size, hint, &ins, 0);
  5657. if (ret) {
  5658. unuse_block_rsv(root->fs_info, block_rsv, blocksize);
  5659. return ERR_PTR(ret);
  5660. }
  5661. buf = btrfs_init_new_buffer(trans, root, ins.objectid,
  5662. blocksize, level);
  5663. BUG_ON(IS_ERR(buf)); /* -ENOMEM */
  5664. if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
  5665. if (parent == 0)
  5666. parent = ins.objectid;
  5667. flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
  5668. } else
  5669. BUG_ON(parent > 0);
  5670. if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
  5671. struct btrfs_delayed_extent_op *extent_op;
  5672. extent_op = btrfs_alloc_delayed_extent_op();
  5673. BUG_ON(!extent_op); /* -ENOMEM */
  5674. if (key)
  5675. memcpy(&extent_op->key, key, sizeof(extent_op->key));
  5676. else
  5677. memset(&extent_op->key, 0, sizeof(extent_op->key));
  5678. extent_op->flags_to_set = flags;
  5679. extent_op->update_key = 1;
  5680. extent_op->update_flags = 1;
  5681. extent_op->is_data = 0;
  5682. ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
  5683. ins.objectid,
  5684. ins.offset, parent, root_objectid,
  5685. level, BTRFS_ADD_DELAYED_EXTENT,
  5686. extent_op, 0);
  5687. BUG_ON(ret); /* -ENOMEM */
  5688. }
  5689. return buf;
  5690. }
  5691. struct walk_control {
  5692. u64 refs[BTRFS_MAX_LEVEL];
  5693. u64 flags[BTRFS_MAX_LEVEL];
  5694. struct btrfs_key update_progress;
  5695. int stage;
  5696. int level;
  5697. int shared_level;
  5698. int update_ref;
  5699. int keep_locks;
  5700. int reada_slot;
  5701. int reada_count;
  5702. int for_reloc;
  5703. };
  5704. #define DROP_REFERENCE 1
  5705. #define UPDATE_BACKREF 2
  5706. static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
  5707. struct btrfs_root *root,
  5708. struct walk_control *wc,
  5709. struct btrfs_path *path)
  5710. {
  5711. u64 bytenr;
  5712. u64 generation;
  5713. u64 refs;
  5714. u64 flags;
  5715. u32 nritems;
  5716. u32 blocksize;
  5717. struct btrfs_key key;
  5718. struct extent_buffer *eb;
  5719. int ret;
  5720. int slot;
  5721. int nread = 0;
  5722. if (path->slots[wc->level] < wc->reada_slot) {
  5723. wc->reada_count = wc->reada_count * 2 / 3;
  5724. wc->reada_count = max(wc->reada_count, 2);
  5725. } else {
  5726. wc->reada_count = wc->reada_count * 3 / 2;
  5727. wc->reada_count = min_t(int, wc->reada_count,
  5728. BTRFS_NODEPTRS_PER_BLOCK(root));
  5729. }
  5730. eb = path->nodes[wc->level];
  5731. nritems = btrfs_header_nritems(eb);
  5732. blocksize = btrfs_level_size(root, wc->level - 1);
  5733. for (slot = path->slots[wc->level]; slot < nritems; slot++) {
  5734. if (nread >= wc->reada_count)
  5735. break;
  5736. cond_resched();
  5737. bytenr = btrfs_node_blockptr(eb, slot);
  5738. generation = btrfs_node_ptr_generation(eb, slot);
  5739. if (slot == path->slots[wc->level])
  5740. goto reada;
  5741. if (wc->stage == UPDATE_BACKREF &&
  5742. generation <= root->root_key.offset)
  5743. continue;
  5744. /* We don't lock the tree block, it's OK to be racy here */
  5745. ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
  5746. &refs, &flags);
  5747. /* We don't care about errors in readahead. */
  5748. if (ret < 0)
  5749. continue;
  5750. BUG_ON(refs == 0);
  5751. if (wc->stage == DROP_REFERENCE) {
  5752. if (refs == 1)
  5753. goto reada;
  5754. if (wc->level == 1 &&
  5755. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5756. continue;
  5757. if (!wc->update_ref ||
  5758. generation <= root->root_key.offset)
  5759. continue;
  5760. btrfs_node_key_to_cpu(eb, &key, slot);
  5761. ret = btrfs_comp_cpu_keys(&key,
  5762. &wc->update_progress);
  5763. if (ret < 0)
  5764. continue;
  5765. } else {
  5766. if (wc->level == 1 &&
  5767. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5768. continue;
  5769. }
  5770. reada:
  5771. ret = readahead_tree_block(root, bytenr, blocksize,
  5772. generation);
  5773. if (ret)
  5774. break;
  5775. nread++;
  5776. }
  5777. wc->reada_slot = slot;
  5778. }
  5779. /*
  5780. * hepler to process tree block while walking down the tree.
  5781. *
  5782. * when wc->stage == UPDATE_BACKREF, this function updates
  5783. * back refs for pointers in the block.
  5784. *
  5785. * NOTE: return value 1 means we should stop walking down.
  5786. */
  5787. static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
  5788. struct btrfs_root *root,
  5789. struct btrfs_path *path,
  5790. struct walk_control *wc, int lookup_info)
  5791. {
  5792. int level = wc->level;
  5793. struct extent_buffer *eb = path->nodes[level];
  5794. u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  5795. int ret;
  5796. if (wc->stage == UPDATE_BACKREF &&
  5797. btrfs_header_owner(eb) != root->root_key.objectid)
  5798. return 1;
  5799. /*
  5800. * when reference count of tree block is 1, it won't increase
  5801. * again. once full backref flag is set, we never clear it.
  5802. */
  5803. if (lookup_info &&
  5804. ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
  5805. (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
  5806. BUG_ON(!path->locks[level]);
  5807. ret = btrfs_lookup_extent_info(trans, root,
  5808. eb->start, eb->len,
  5809. &wc->refs[level],
  5810. &wc->flags[level]);
  5811. BUG_ON(ret == -ENOMEM);
  5812. if (ret)
  5813. return ret;
  5814. BUG_ON(wc->refs[level] == 0);
  5815. }
  5816. if (wc->stage == DROP_REFERENCE) {
  5817. if (wc->refs[level] > 1)
  5818. return 1;
  5819. if (path->locks[level] && !wc->keep_locks) {
  5820. btrfs_tree_unlock_rw(eb, path->locks[level]);
  5821. path->locks[level] = 0;
  5822. }
  5823. return 0;
  5824. }
  5825. /* wc->stage == UPDATE_BACKREF */
  5826. if (!(wc->flags[level] & flag)) {
  5827. BUG_ON(!path->locks[level]);
  5828. ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
  5829. BUG_ON(ret); /* -ENOMEM */
  5830. ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
  5831. BUG_ON(ret); /* -ENOMEM */
  5832. ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
  5833. eb->len, flag, 0);
  5834. BUG_ON(ret); /* -ENOMEM */
  5835. wc->flags[level] |= flag;
  5836. }
  5837. /*
  5838. * the block is shared by multiple trees, so it's not good to
  5839. * keep the tree lock
  5840. */
  5841. if (path->locks[level] && level > 0) {
  5842. btrfs_tree_unlock_rw(eb, path->locks[level]);
  5843. path->locks[level] = 0;
  5844. }
  5845. return 0;
  5846. }
  5847. /*
  5848. * hepler to process tree block pointer.
  5849. *
  5850. * when wc->stage == DROP_REFERENCE, this function checks
  5851. * reference count of the block pointed to. if the block
  5852. * is shared and we need update back refs for the subtree
  5853. * rooted at the block, this function changes wc->stage to
  5854. * UPDATE_BACKREF. if the block is shared and there is no
  5855. * need to update back, this function drops the reference
  5856. * to the block.
  5857. *
  5858. * NOTE: return value 1 means we should stop walking down.
  5859. */
  5860. static noinline int do_walk_down(struct btrfs_trans_handle *trans,
  5861. struct btrfs_root *root,
  5862. struct btrfs_path *path,
  5863. struct walk_control *wc, int *lookup_info)
  5864. {
  5865. u64 bytenr;
  5866. u64 generation;
  5867. u64 parent;
  5868. u32 blocksize;
  5869. struct btrfs_key key;
  5870. struct extent_buffer *next;
  5871. int level = wc->level;
  5872. int reada = 0;
  5873. int ret = 0;
  5874. generation = btrfs_node_ptr_generation(path->nodes[level],
  5875. path->slots[level]);
  5876. /*
  5877. * if the lower level block was created before the snapshot
  5878. * was created, we know there is no need to update back refs
  5879. * for the subtree
  5880. */
  5881. if (wc->stage == UPDATE_BACKREF &&
  5882. generation <= root->root_key.offset) {
  5883. *lookup_info = 1;
  5884. return 1;
  5885. }
  5886. bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
  5887. blocksize = btrfs_level_size(root, level - 1);
  5888. next = btrfs_find_tree_block(root, bytenr, blocksize);
  5889. if (!next) {
  5890. next = btrfs_find_create_tree_block(root, bytenr, blocksize);
  5891. if (!next)
  5892. return -ENOMEM;
  5893. reada = 1;
  5894. }
  5895. btrfs_tree_lock(next);
  5896. btrfs_set_lock_blocking(next);
  5897. ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
  5898. &wc->refs[level - 1],
  5899. &wc->flags[level - 1]);
  5900. if (ret < 0) {
  5901. btrfs_tree_unlock(next);
  5902. return ret;
  5903. }
  5904. BUG_ON(wc->refs[level - 1] == 0);
  5905. *lookup_info = 0;
  5906. if (wc->stage == DROP_REFERENCE) {
  5907. if (wc->refs[level - 1] > 1) {
  5908. if (level == 1 &&
  5909. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5910. goto skip;
  5911. if (!wc->update_ref ||
  5912. generation <= root->root_key.offset)
  5913. goto skip;
  5914. btrfs_node_key_to_cpu(path->nodes[level], &key,
  5915. path->slots[level]);
  5916. ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
  5917. if (ret < 0)
  5918. goto skip;
  5919. wc->stage = UPDATE_BACKREF;
  5920. wc->shared_level = level - 1;
  5921. }
  5922. } else {
  5923. if (level == 1 &&
  5924. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5925. goto skip;
  5926. }
  5927. if (!btrfs_buffer_uptodate(next, generation, 0)) {
  5928. btrfs_tree_unlock(next);
  5929. free_extent_buffer(next);
  5930. next = NULL;
  5931. *lookup_info = 1;
  5932. }
  5933. if (!next) {
  5934. if (reada && level == 1)
  5935. reada_walk_down(trans, root, wc, path);
  5936. next = read_tree_block(root, bytenr, blocksize, generation);
  5937. if (!next)
  5938. return -EIO;
  5939. btrfs_tree_lock(next);
  5940. btrfs_set_lock_blocking(next);
  5941. }
  5942. level--;
  5943. BUG_ON(level != btrfs_header_level(next));
  5944. path->nodes[level] = next;
  5945. path->slots[level] = 0;
  5946. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  5947. wc->level = level;
  5948. if (wc->level == 1)
  5949. wc->reada_slot = 0;
  5950. return 0;
  5951. skip:
  5952. wc->refs[level - 1] = 0;
  5953. wc->flags[level - 1] = 0;
  5954. if (wc->stage == DROP_REFERENCE) {
  5955. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
  5956. parent = path->nodes[level]->start;
  5957. } else {
  5958. BUG_ON(root->root_key.objectid !=
  5959. btrfs_header_owner(path->nodes[level]));
  5960. parent = 0;
  5961. }
  5962. ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
  5963. root->root_key.objectid, level - 1, 0, 0);
  5964. BUG_ON(ret); /* -ENOMEM */
  5965. }
  5966. btrfs_tree_unlock(next);
  5967. free_extent_buffer(next);
  5968. *lookup_info = 1;
  5969. return 1;
  5970. }
  5971. /*
  5972. * hepler to process tree block while walking up the tree.
  5973. *
  5974. * when wc->stage == DROP_REFERENCE, this function drops
  5975. * reference count on the block.
  5976. *
  5977. * when wc->stage == UPDATE_BACKREF, this function changes
  5978. * wc->stage back to DROP_REFERENCE if we changed wc->stage
  5979. * to UPDATE_BACKREF previously while processing the block.
  5980. *
  5981. * NOTE: return value 1 means we should stop walking up.
  5982. */
  5983. static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
  5984. struct btrfs_root *root,
  5985. struct btrfs_path *path,
  5986. struct walk_control *wc)
  5987. {
  5988. int ret;
  5989. int level = wc->level;
  5990. struct extent_buffer *eb = path->nodes[level];
  5991. u64 parent = 0;
  5992. if (wc->stage == UPDATE_BACKREF) {
  5993. BUG_ON(wc->shared_level < level);
  5994. if (level < wc->shared_level)
  5995. goto out;
  5996. ret = find_next_key(path, level + 1, &wc->update_progress);
  5997. if (ret > 0)
  5998. wc->update_ref = 0;
  5999. wc->stage = DROP_REFERENCE;
  6000. wc->shared_level = -1;
  6001. path->slots[level] = 0;
  6002. /*
  6003. * check reference count again if the block isn't locked.
  6004. * we should start walking down the tree again if reference
  6005. * count is one.
  6006. */
  6007. if (!path->locks[level]) {
  6008. BUG_ON(level == 0);
  6009. btrfs_tree_lock(eb);
  6010. btrfs_set_lock_blocking(eb);
  6011. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6012. ret = btrfs_lookup_extent_info(trans, root,
  6013. eb->start, eb->len,
  6014. &wc->refs[level],
  6015. &wc->flags[level]);
  6016. if (ret < 0) {
  6017. btrfs_tree_unlock_rw(eb, path->locks[level]);
  6018. path->locks[level] = 0;
  6019. return ret;
  6020. }
  6021. BUG_ON(wc->refs[level] == 0);
  6022. if (wc->refs[level] == 1) {
  6023. btrfs_tree_unlock_rw(eb, path->locks[level]);
  6024. path->locks[level] = 0;
  6025. return 1;
  6026. }
  6027. }
  6028. }
  6029. /* wc->stage == DROP_REFERENCE */
  6030. BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
  6031. if (wc->refs[level] == 1) {
  6032. if (level == 0) {
  6033. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  6034. ret = btrfs_dec_ref(trans, root, eb, 1,
  6035. wc->for_reloc);
  6036. else
  6037. ret = btrfs_dec_ref(trans, root, eb, 0,
  6038. wc->for_reloc);
  6039. BUG_ON(ret); /* -ENOMEM */
  6040. }
  6041. /* make block locked assertion in clean_tree_block happy */
  6042. if (!path->locks[level] &&
  6043. btrfs_header_generation(eb) == trans->transid) {
  6044. btrfs_tree_lock(eb);
  6045. btrfs_set_lock_blocking(eb);
  6046. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6047. }
  6048. clean_tree_block(trans, root, eb);
  6049. }
  6050. if (eb == root->node) {
  6051. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  6052. parent = eb->start;
  6053. else
  6054. BUG_ON(root->root_key.objectid !=
  6055. btrfs_header_owner(eb));
  6056. } else {
  6057. if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  6058. parent = path->nodes[level + 1]->start;
  6059. else
  6060. BUG_ON(root->root_key.objectid !=
  6061. btrfs_header_owner(path->nodes[level + 1]));
  6062. }
  6063. btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
  6064. out:
  6065. wc->refs[level] = 0;
  6066. wc->flags[level] = 0;
  6067. return 0;
  6068. }
  6069. static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
  6070. struct btrfs_root *root,
  6071. struct btrfs_path *path,
  6072. struct walk_control *wc)
  6073. {
  6074. int level = wc->level;
  6075. int lookup_info = 1;
  6076. int ret;
  6077. while (level >= 0) {
  6078. ret = walk_down_proc(trans, root, path, wc, lookup_info);
  6079. if (ret > 0)
  6080. break;
  6081. if (level == 0)
  6082. break;
  6083. if (path->slots[level] >=
  6084. btrfs_header_nritems(path->nodes[level]))
  6085. break;
  6086. ret = do_walk_down(trans, root, path, wc, &lookup_info);
  6087. if (ret > 0) {
  6088. path->slots[level]++;
  6089. continue;
  6090. } else if (ret < 0)
  6091. return ret;
  6092. level = wc->level;
  6093. }
  6094. return 0;
  6095. }
  6096. static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  6097. struct btrfs_root *root,
  6098. struct btrfs_path *path,
  6099. struct walk_control *wc, int max_level)
  6100. {
  6101. int level = wc->level;
  6102. int ret;
  6103. path->slots[level] = btrfs_header_nritems(path->nodes[level]);
  6104. while (level < max_level && path->nodes[level]) {
  6105. wc->level = level;
  6106. if (path->slots[level] + 1 <
  6107. btrfs_header_nritems(path->nodes[level])) {
  6108. path->slots[level]++;
  6109. return 0;
  6110. } else {
  6111. ret = walk_up_proc(trans, root, path, wc);
  6112. if (ret > 0)
  6113. return 0;
  6114. if (path->locks[level]) {
  6115. btrfs_tree_unlock_rw(path->nodes[level],
  6116. path->locks[level]);
  6117. path->locks[level] = 0;
  6118. }
  6119. free_extent_buffer(path->nodes[level]);
  6120. path->nodes[level] = NULL;
  6121. level++;
  6122. }
  6123. }
  6124. return 1;
  6125. }
  6126. /*
  6127. * drop a subvolume tree.
  6128. *
  6129. * this function traverses the tree freeing any blocks that only
  6130. * referenced by the tree.
  6131. *
  6132. * when a shared tree block is found. this function decreases its
  6133. * reference count by one. if update_ref is true, this function
  6134. * also make sure backrefs for the shared block and all lower level
  6135. * blocks are properly updated.
  6136. */
  6137. int btrfs_drop_snapshot(struct btrfs_root *root,
  6138. struct btrfs_block_rsv *block_rsv, int update_ref,
  6139. int for_reloc)
  6140. {
  6141. struct btrfs_path *path;
  6142. struct btrfs_trans_handle *trans;
  6143. struct btrfs_root *tree_root = root->fs_info->tree_root;
  6144. struct btrfs_root_item *root_item = &root->root_item;
  6145. struct walk_control *wc;
  6146. struct btrfs_key key;
  6147. int err = 0;
  6148. int ret;
  6149. int level;
  6150. path = btrfs_alloc_path();
  6151. if (!path) {
  6152. err = -ENOMEM;
  6153. goto out;
  6154. }
  6155. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  6156. if (!wc) {
  6157. btrfs_free_path(path);
  6158. err = -ENOMEM;
  6159. goto out;
  6160. }
  6161. trans = btrfs_start_transaction(tree_root, 0);
  6162. if (IS_ERR(trans)) {
  6163. err = PTR_ERR(trans);
  6164. goto out_free;
  6165. }
  6166. if (block_rsv)
  6167. trans->block_rsv = block_rsv;
  6168. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  6169. level = btrfs_header_level(root->node);
  6170. path->nodes[level] = btrfs_lock_root_node(root);
  6171. btrfs_set_lock_blocking(path->nodes[level]);
  6172. path->slots[level] = 0;
  6173. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6174. memset(&wc->update_progress, 0,
  6175. sizeof(wc->update_progress));
  6176. } else {
  6177. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  6178. memcpy(&wc->update_progress, &key,
  6179. sizeof(wc->update_progress));
  6180. level = root_item->drop_level;
  6181. BUG_ON(level == 0);
  6182. path->lowest_level = level;
  6183. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  6184. path->lowest_level = 0;
  6185. if (ret < 0) {
  6186. err = ret;
  6187. goto out_end_trans;
  6188. }
  6189. WARN_ON(ret > 0);
  6190. /*
  6191. * unlock our path, this is safe because only this
  6192. * function is allowed to delete this snapshot
  6193. */
  6194. btrfs_unlock_up_safe(path, 0);
  6195. level = btrfs_header_level(root->node);
  6196. while (1) {
  6197. btrfs_tree_lock(path->nodes[level]);
  6198. btrfs_set_lock_blocking(path->nodes[level]);
  6199. ret = btrfs_lookup_extent_info(trans, root,
  6200. path->nodes[level]->start,
  6201. path->nodes[level]->len,
  6202. &wc->refs[level],
  6203. &wc->flags[level]);
  6204. if (ret < 0) {
  6205. err = ret;
  6206. goto out_end_trans;
  6207. }
  6208. BUG_ON(wc->refs[level] == 0);
  6209. if (level == root_item->drop_level)
  6210. break;
  6211. btrfs_tree_unlock(path->nodes[level]);
  6212. WARN_ON(wc->refs[level] != 1);
  6213. level--;
  6214. }
  6215. }
  6216. wc->level = level;
  6217. wc->shared_level = -1;
  6218. wc->stage = DROP_REFERENCE;
  6219. wc->update_ref = update_ref;
  6220. wc->keep_locks = 0;
  6221. wc->for_reloc = for_reloc;
  6222. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  6223. while (1) {
  6224. ret = walk_down_tree(trans, root, path, wc);
  6225. if (ret < 0) {
  6226. err = ret;
  6227. break;
  6228. }
  6229. ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
  6230. if (ret < 0) {
  6231. err = ret;
  6232. break;
  6233. }
  6234. if (ret > 0) {
  6235. BUG_ON(wc->stage != DROP_REFERENCE);
  6236. break;
  6237. }
  6238. if (wc->stage == DROP_REFERENCE) {
  6239. level = wc->level;
  6240. btrfs_node_key(path->nodes[level],
  6241. &root_item->drop_progress,
  6242. path->slots[level]);
  6243. root_item->drop_level = level;
  6244. }
  6245. BUG_ON(wc->level == 0);
  6246. if (btrfs_should_end_transaction(trans, tree_root)) {
  6247. ret = btrfs_update_root(trans, tree_root,
  6248. &root->root_key,
  6249. root_item);
  6250. if (ret) {
  6251. btrfs_abort_transaction(trans, tree_root, ret);
  6252. err = ret;
  6253. goto out_end_trans;
  6254. }
  6255. btrfs_end_transaction_throttle(trans, tree_root);
  6256. trans = btrfs_start_transaction(tree_root, 0);
  6257. if (IS_ERR(trans)) {
  6258. err = PTR_ERR(trans);
  6259. goto out_free;
  6260. }
  6261. if (block_rsv)
  6262. trans->block_rsv = block_rsv;
  6263. }
  6264. }
  6265. btrfs_release_path(path);
  6266. if (err)
  6267. goto out_end_trans;
  6268. ret = btrfs_del_root(trans, tree_root, &root->root_key);
  6269. if (ret) {
  6270. btrfs_abort_transaction(trans, tree_root, ret);
  6271. goto out_end_trans;
  6272. }
  6273. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  6274. ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
  6275. NULL, NULL);
  6276. if (ret < 0) {
  6277. btrfs_abort_transaction(trans, tree_root, ret);
  6278. err = ret;
  6279. goto out_end_trans;
  6280. } else if (ret > 0) {
  6281. /* if we fail to delete the orphan item this time
  6282. * around, it'll get picked up the next time.
  6283. *
  6284. * The most common failure here is just -ENOENT.
  6285. */
  6286. btrfs_del_orphan_item(trans, tree_root,
  6287. root->root_key.objectid);
  6288. }
  6289. }
  6290. if (root->in_radix) {
  6291. btrfs_free_fs_root(tree_root->fs_info, root);
  6292. } else {
  6293. free_extent_buffer(root->node);
  6294. free_extent_buffer(root->commit_root);
  6295. kfree(root);
  6296. }
  6297. out_end_trans:
  6298. btrfs_end_transaction_throttle(trans, tree_root);
  6299. out_free:
  6300. kfree(wc);
  6301. btrfs_free_path(path);
  6302. out:
  6303. if (err)
  6304. btrfs_std_error(root->fs_info, err);
  6305. return err;
  6306. }
  6307. /*
  6308. * drop subtree rooted at tree block 'node'.
  6309. *
  6310. * NOTE: this function will unlock and release tree block 'node'
  6311. * only used by relocation code
  6312. */
  6313. int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
  6314. struct btrfs_root *root,
  6315. struct extent_buffer *node,
  6316. struct extent_buffer *parent)
  6317. {
  6318. struct btrfs_path *path;
  6319. struct walk_control *wc;
  6320. int level;
  6321. int parent_level;
  6322. int ret = 0;
  6323. int wret;
  6324. BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  6325. path = btrfs_alloc_path();
  6326. if (!path)
  6327. return -ENOMEM;
  6328. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  6329. if (!wc) {
  6330. btrfs_free_path(path);
  6331. return -ENOMEM;
  6332. }
  6333. btrfs_assert_tree_locked(parent);
  6334. parent_level = btrfs_header_level(parent);
  6335. extent_buffer_get(parent);
  6336. path->nodes[parent_level] = parent;
  6337. path->slots[parent_level] = btrfs_header_nritems(parent);
  6338. btrfs_assert_tree_locked(node);
  6339. level = btrfs_header_level(node);
  6340. path->nodes[level] = node;
  6341. path->slots[level] = 0;
  6342. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6343. wc->refs[parent_level] = 1;
  6344. wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  6345. wc->level = level;
  6346. wc->shared_level = -1;
  6347. wc->stage = DROP_REFERENCE;
  6348. wc->update_ref = 0;
  6349. wc->keep_locks = 1;
  6350. wc->for_reloc = 1;
  6351. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  6352. while (1) {
  6353. wret = walk_down_tree(trans, root, path, wc);
  6354. if (wret < 0) {
  6355. ret = wret;
  6356. break;
  6357. }
  6358. wret = walk_up_tree(trans, root, path, wc, parent_level);
  6359. if (wret < 0)
  6360. ret = wret;
  6361. if (wret != 0)
  6362. break;
  6363. }
  6364. kfree(wc);
  6365. btrfs_free_path(path);
  6366. return ret;
  6367. }
  6368. static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
  6369. {
  6370. u64 num_devices;
  6371. u64 stripped;
  6372. /*
  6373. * if restripe for this chunk_type is on pick target profile and
  6374. * return, otherwise do the usual balance
  6375. */
  6376. stripped = get_restripe_target(root->fs_info, flags);
  6377. if (stripped)
  6378. return extended_to_chunk(stripped);
  6379. /*
  6380. * we add in the count of missing devices because we want
  6381. * to make sure that any RAID levels on a degraded FS
  6382. * continue to be honored.
  6383. */
  6384. num_devices = root->fs_info->fs_devices->rw_devices +
  6385. root->fs_info->fs_devices->missing_devices;
  6386. stripped = BTRFS_BLOCK_GROUP_RAID0 |
  6387. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
  6388. if (num_devices == 1) {
  6389. stripped |= BTRFS_BLOCK_GROUP_DUP;
  6390. stripped = flags & ~stripped;
  6391. /* turn raid0 into single device chunks */
  6392. if (flags & BTRFS_BLOCK_GROUP_RAID0)
  6393. return stripped;
  6394. /* turn mirroring into duplication */
  6395. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  6396. BTRFS_BLOCK_GROUP_RAID10))
  6397. return stripped | BTRFS_BLOCK_GROUP_DUP;
  6398. } else {
  6399. /* they already had raid on here, just return */
  6400. if (flags & stripped)
  6401. return flags;
  6402. stripped |= BTRFS_BLOCK_GROUP_DUP;
  6403. stripped = flags & ~stripped;
  6404. /* switch duplicated blocks with raid1 */
  6405. if (flags & BTRFS_BLOCK_GROUP_DUP)
  6406. return stripped | BTRFS_BLOCK_GROUP_RAID1;
  6407. /* this is drive concat, leave it alone */
  6408. }
  6409. return flags;
  6410. }
  6411. static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
  6412. {
  6413. struct btrfs_space_info *sinfo = cache->space_info;
  6414. u64 num_bytes;
  6415. u64 min_allocable_bytes;
  6416. int ret = -ENOSPC;
  6417. /*
  6418. * We need some metadata space and system metadata space for
  6419. * allocating chunks in some corner cases until we force to set
  6420. * it to be readonly.
  6421. */
  6422. if ((sinfo->flags &
  6423. (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
  6424. !force)
  6425. min_allocable_bytes = 1 * 1024 * 1024;
  6426. else
  6427. min_allocable_bytes = 0;
  6428. spin_lock(&sinfo->lock);
  6429. spin_lock(&cache->lock);
  6430. if (cache->ro) {
  6431. ret = 0;
  6432. goto out;
  6433. }
  6434. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  6435. cache->bytes_super - btrfs_block_group_used(&cache->item);
  6436. if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
  6437. sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
  6438. min_allocable_bytes <= sinfo->total_bytes) {
  6439. sinfo->bytes_readonly += num_bytes;
  6440. cache->ro = 1;
  6441. ret = 0;
  6442. }
  6443. out:
  6444. spin_unlock(&cache->lock);
  6445. spin_unlock(&sinfo->lock);
  6446. return ret;
  6447. }
  6448. int btrfs_set_block_group_ro(struct btrfs_root *root,
  6449. struct btrfs_block_group_cache *cache)
  6450. {
  6451. struct btrfs_trans_handle *trans;
  6452. u64 alloc_flags;
  6453. int ret;
  6454. BUG_ON(cache->ro);
  6455. trans = btrfs_join_transaction(root);
  6456. if (IS_ERR(trans))
  6457. return PTR_ERR(trans);
  6458. alloc_flags = update_block_group_flags(root, cache->flags);
  6459. if (alloc_flags != cache->flags) {
  6460. ret = do_chunk_alloc(trans, root, alloc_flags,
  6461. CHUNK_ALLOC_FORCE);
  6462. if (ret < 0)
  6463. goto out;
  6464. }
  6465. ret = set_block_group_ro(cache, 0);
  6466. if (!ret)
  6467. goto out;
  6468. alloc_flags = get_alloc_profile(root, cache->space_info->flags);
  6469. ret = do_chunk_alloc(trans, root, alloc_flags,
  6470. CHUNK_ALLOC_FORCE);
  6471. if (ret < 0)
  6472. goto out;
  6473. ret = set_block_group_ro(cache, 0);
  6474. out:
  6475. btrfs_end_transaction(trans, root);
  6476. return ret;
  6477. }
  6478. int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
  6479. struct btrfs_root *root, u64 type)
  6480. {
  6481. u64 alloc_flags = get_alloc_profile(root, type);
  6482. return do_chunk_alloc(trans, root, alloc_flags,
  6483. CHUNK_ALLOC_FORCE);
  6484. }
  6485. /*
  6486. * helper to account the unused space of all the readonly block group in the
  6487. * list. takes mirrors into account.
  6488. */
  6489. static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
  6490. {
  6491. struct btrfs_block_group_cache *block_group;
  6492. u64 free_bytes = 0;
  6493. int factor;
  6494. list_for_each_entry(block_group, groups_list, list) {
  6495. spin_lock(&block_group->lock);
  6496. if (!block_group->ro) {
  6497. spin_unlock(&block_group->lock);
  6498. continue;
  6499. }
  6500. if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
  6501. BTRFS_BLOCK_GROUP_RAID10 |
  6502. BTRFS_BLOCK_GROUP_DUP))
  6503. factor = 2;
  6504. else
  6505. factor = 1;
  6506. free_bytes += (block_group->key.offset -
  6507. btrfs_block_group_used(&block_group->item)) *
  6508. factor;
  6509. spin_unlock(&block_group->lock);
  6510. }
  6511. return free_bytes;
  6512. }
  6513. /*
  6514. * helper to account the unused space of all the readonly block group in the
  6515. * space_info. takes mirrors into account.
  6516. */
  6517. u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
  6518. {
  6519. int i;
  6520. u64 free_bytes = 0;
  6521. spin_lock(&sinfo->lock);
  6522. for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  6523. if (!list_empty(&sinfo->block_groups[i]))
  6524. free_bytes += __btrfs_get_ro_block_group_free_space(
  6525. &sinfo->block_groups[i]);
  6526. spin_unlock(&sinfo->lock);
  6527. return free_bytes;
  6528. }
  6529. void btrfs_set_block_group_rw(struct btrfs_root *root,
  6530. struct btrfs_block_group_cache *cache)
  6531. {
  6532. struct btrfs_space_info *sinfo = cache->space_info;
  6533. u64 num_bytes;
  6534. BUG_ON(!cache->ro);
  6535. spin_lock(&sinfo->lock);
  6536. spin_lock(&cache->lock);
  6537. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  6538. cache->bytes_super - btrfs_block_group_used(&cache->item);
  6539. sinfo->bytes_readonly -= num_bytes;
  6540. cache->ro = 0;
  6541. spin_unlock(&cache->lock);
  6542. spin_unlock(&sinfo->lock);
  6543. }
  6544. /*
  6545. * checks to see if its even possible to relocate this block group.
  6546. *
  6547. * @return - -1 if it's not a good idea to relocate this block group, 0 if its
  6548. * ok to go ahead and try.
  6549. */
  6550. int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
  6551. {
  6552. struct btrfs_block_group_cache *block_group;
  6553. struct btrfs_space_info *space_info;
  6554. struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
  6555. struct btrfs_device *device;
  6556. u64 min_free;
  6557. u64 dev_min = 1;
  6558. u64 dev_nr = 0;
  6559. u64 target;
  6560. int index;
  6561. int full = 0;
  6562. int ret = 0;
  6563. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  6564. /* odd, couldn't find the block group, leave it alone */
  6565. if (!block_group)
  6566. return -1;
  6567. min_free = btrfs_block_group_used(&block_group->item);
  6568. /* no bytes used, we're good */
  6569. if (!min_free)
  6570. goto out;
  6571. space_info = block_group->space_info;
  6572. spin_lock(&space_info->lock);
  6573. full = space_info->full;
  6574. /*
  6575. * if this is the last block group we have in this space, we can't
  6576. * relocate it unless we're able to allocate a new chunk below.
  6577. *
  6578. * Otherwise, we need to make sure we have room in the space to handle
  6579. * all of the extents from this block group. If we can, we're good
  6580. */
  6581. if ((space_info->total_bytes != block_group->key.offset) &&
  6582. (space_info->bytes_used + space_info->bytes_reserved +
  6583. space_info->bytes_pinned + space_info->bytes_readonly +
  6584. min_free < space_info->total_bytes)) {
  6585. spin_unlock(&space_info->lock);
  6586. goto out;
  6587. }
  6588. spin_unlock(&space_info->lock);
  6589. /*
  6590. * ok we don't have enough space, but maybe we have free space on our
  6591. * devices to allocate new chunks for relocation, so loop through our
  6592. * alloc devices and guess if we have enough space. if this block
  6593. * group is going to be restriped, run checks against the target
  6594. * profile instead of the current one.
  6595. */
  6596. ret = -1;
  6597. /*
  6598. * index:
  6599. * 0: raid10
  6600. * 1: raid1
  6601. * 2: dup
  6602. * 3: raid0
  6603. * 4: single
  6604. */
  6605. target = get_restripe_target(root->fs_info, block_group->flags);
  6606. if (target) {
  6607. index = __get_raid_index(extended_to_chunk(target));
  6608. } else {
  6609. /*
  6610. * this is just a balance, so if we were marked as full
  6611. * we know there is no space for a new chunk
  6612. */
  6613. if (full)
  6614. goto out;
  6615. index = get_block_group_index(block_group);
  6616. }
  6617. if (index == BTRFS_RAID_RAID10) {
  6618. dev_min = 4;
  6619. /* Divide by 2 */
  6620. min_free >>= 1;
  6621. } else if (index == BTRFS_RAID_RAID1) {
  6622. dev_min = 2;
  6623. } else if (index == BTRFS_RAID_DUP) {
  6624. /* Multiply by 2 */
  6625. min_free <<= 1;
  6626. } else if (index == BTRFS_RAID_RAID0) {
  6627. dev_min = fs_devices->rw_devices;
  6628. do_div(min_free, dev_min);
  6629. }
  6630. mutex_lock(&root->fs_info->chunk_mutex);
  6631. list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
  6632. u64 dev_offset;
  6633. /*
  6634. * check to make sure we can actually find a chunk with enough
  6635. * space to fit our block group in.
  6636. */
  6637. if (device->total_bytes > device->bytes_used + min_free &&
  6638. !device->is_tgtdev_for_dev_replace) {
  6639. ret = find_free_dev_extent(device, min_free,
  6640. &dev_offset, NULL);
  6641. if (!ret)
  6642. dev_nr++;
  6643. if (dev_nr >= dev_min)
  6644. break;
  6645. ret = -1;
  6646. }
  6647. }
  6648. mutex_unlock(&root->fs_info->chunk_mutex);
  6649. out:
  6650. btrfs_put_block_group(block_group);
  6651. return ret;
  6652. }
  6653. static int find_first_block_group(struct btrfs_root *root,
  6654. struct btrfs_path *path, struct btrfs_key *key)
  6655. {
  6656. int ret = 0;
  6657. struct btrfs_key found_key;
  6658. struct extent_buffer *leaf;
  6659. int slot;
  6660. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  6661. if (ret < 0)
  6662. goto out;
  6663. while (1) {
  6664. slot = path->slots[0];
  6665. leaf = path->nodes[0];
  6666. if (slot >= btrfs_header_nritems(leaf)) {
  6667. ret = btrfs_next_leaf(root, path);
  6668. if (ret == 0)
  6669. continue;
  6670. if (ret < 0)
  6671. goto out;
  6672. break;
  6673. }
  6674. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  6675. if (found_key.objectid >= key->objectid &&
  6676. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  6677. ret = 0;
  6678. goto out;
  6679. }
  6680. path->slots[0]++;
  6681. }
  6682. out:
  6683. return ret;
  6684. }
  6685. void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
  6686. {
  6687. struct btrfs_block_group_cache *block_group;
  6688. u64 last = 0;
  6689. while (1) {
  6690. struct inode *inode;
  6691. block_group = btrfs_lookup_first_block_group(info, last);
  6692. while (block_group) {
  6693. spin_lock(&block_group->lock);
  6694. if (block_group->iref)
  6695. break;
  6696. spin_unlock(&block_group->lock);
  6697. block_group = next_block_group(info->tree_root,
  6698. block_group);
  6699. }
  6700. if (!block_group) {
  6701. if (last == 0)
  6702. break;
  6703. last = 0;
  6704. continue;
  6705. }
  6706. inode = block_group->inode;
  6707. block_group->iref = 0;
  6708. block_group->inode = NULL;
  6709. spin_unlock(&block_group->lock);
  6710. iput(inode);
  6711. last = block_group->key.objectid + block_group->key.offset;
  6712. btrfs_put_block_group(block_group);
  6713. }
  6714. }
  6715. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  6716. {
  6717. struct btrfs_block_group_cache *block_group;
  6718. struct btrfs_space_info *space_info;
  6719. struct btrfs_caching_control *caching_ctl;
  6720. struct rb_node *n;
  6721. down_write(&info->extent_commit_sem);
  6722. while (!list_empty(&info->caching_block_groups)) {
  6723. caching_ctl = list_entry(info->caching_block_groups.next,
  6724. struct btrfs_caching_control, list);
  6725. list_del(&caching_ctl->list);
  6726. put_caching_control(caching_ctl);
  6727. }
  6728. up_write(&info->extent_commit_sem);
  6729. spin_lock(&info->block_group_cache_lock);
  6730. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  6731. block_group = rb_entry(n, struct btrfs_block_group_cache,
  6732. cache_node);
  6733. rb_erase(&block_group->cache_node,
  6734. &info->block_group_cache_tree);
  6735. spin_unlock(&info->block_group_cache_lock);
  6736. down_write(&block_group->space_info->groups_sem);
  6737. list_del(&block_group->list);
  6738. up_write(&block_group->space_info->groups_sem);
  6739. if (block_group->cached == BTRFS_CACHE_STARTED)
  6740. wait_block_group_cache_done(block_group);
  6741. /*
  6742. * We haven't cached this block group, which means we could
  6743. * possibly have excluded extents on this block group.
  6744. */
  6745. if (block_group->cached == BTRFS_CACHE_NO)
  6746. free_excluded_extents(info->extent_root, block_group);
  6747. btrfs_remove_free_space_cache(block_group);
  6748. btrfs_put_block_group(block_group);
  6749. spin_lock(&info->block_group_cache_lock);
  6750. }
  6751. spin_unlock(&info->block_group_cache_lock);
  6752. /* now that all the block groups are freed, go through and
  6753. * free all the space_info structs. This is only called during
  6754. * the final stages of unmount, and so we know nobody is
  6755. * using them. We call synchronize_rcu() once before we start,
  6756. * just to be on the safe side.
  6757. */
  6758. synchronize_rcu();
  6759. release_global_block_rsv(info);
  6760. while(!list_empty(&info->space_info)) {
  6761. space_info = list_entry(info->space_info.next,
  6762. struct btrfs_space_info,
  6763. list);
  6764. if (space_info->bytes_pinned > 0 ||
  6765. space_info->bytes_reserved > 0 ||
  6766. space_info->bytes_may_use > 0) {
  6767. WARN_ON(1);
  6768. dump_space_info(space_info, 0, 0);
  6769. }
  6770. list_del(&space_info->list);
  6771. kfree(space_info);
  6772. }
  6773. return 0;
  6774. }
  6775. static void __link_block_group(struct btrfs_space_info *space_info,
  6776. struct btrfs_block_group_cache *cache)
  6777. {
  6778. int index = get_block_group_index(cache);
  6779. down_write(&space_info->groups_sem);
  6780. list_add_tail(&cache->list, &space_info->block_groups[index]);
  6781. up_write(&space_info->groups_sem);
  6782. }
  6783. int btrfs_read_block_groups(struct btrfs_root *root)
  6784. {
  6785. struct btrfs_path *path;
  6786. int ret;
  6787. struct btrfs_block_group_cache *cache;
  6788. struct btrfs_fs_info *info = root->fs_info;
  6789. struct btrfs_space_info *space_info;
  6790. struct btrfs_key key;
  6791. struct btrfs_key found_key;
  6792. struct extent_buffer *leaf;
  6793. int need_clear = 0;
  6794. u64 cache_gen;
  6795. root = info->extent_root;
  6796. key.objectid = 0;
  6797. key.offset = 0;
  6798. btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
  6799. path = btrfs_alloc_path();
  6800. if (!path)
  6801. return -ENOMEM;
  6802. path->reada = 1;
  6803. cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
  6804. if (btrfs_test_opt(root, SPACE_CACHE) &&
  6805. btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
  6806. need_clear = 1;
  6807. if (btrfs_test_opt(root, CLEAR_CACHE))
  6808. need_clear = 1;
  6809. while (1) {
  6810. ret = find_first_block_group(root, path, &key);
  6811. if (ret > 0)
  6812. break;
  6813. if (ret != 0)
  6814. goto error;
  6815. leaf = path->nodes[0];
  6816. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  6817. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  6818. if (!cache) {
  6819. ret = -ENOMEM;
  6820. goto error;
  6821. }
  6822. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  6823. GFP_NOFS);
  6824. if (!cache->free_space_ctl) {
  6825. kfree(cache);
  6826. ret = -ENOMEM;
  6827. goto error;
  6828. }
  6829. atomic_set(&cache->count, 1);
  6830. spin_lock_init(&cache->lock);
  6831. cache->fs_info = info;
  6832. INIT_LIST_HEAD(&cache->list);
  6833. INIT_LIST_HEAD(&cache->cluster_list);
  6834. if (need_clear) {
  6835. /*
  6836. * When we mount with old space cache, we need to
  6837. * set BTRFS_DC_CLEAR and set dirty flag.
  6838. *
  6839. * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
  6840. * truncate the old free space cache inode and
  6841. * setup a new one.
  6842. * b) Setting 'dirty flag' makes sure that we flush
  6843. * the new space cache info onto disk.
  6844. */
  6845. cache->disk_cache_state = BTRFS_DC_CLEAR;
  6846. if (btrfs_test_opt(root, SPACE_CACHE))
  6847. cache->dirty = 1;
  6848. }
  6849. read_extent_buffer(leaf, &cache->item,
  6850. btrfs_item_ptr_offset(leaf, path->slots[0]),
  6851. sizeof(cache->item));
  6852. memcpy(&cache->key, &found_key, sizeof(found_key));
  6853. key.objectid = found_key.objectid + found_key.offset;
  6854. btrfs_release_path(path);
  6855. cache->flags = btrfs_block_group_flags(&cache->item);
  6856. cache->sectorsize = root->sectorsize;
  6857. btrfs_init_free_space_ctl(cache);
  6858. /*
  6859. * We need to exclude the super stripes now so that the space
  6860. * info has super bytes accounted for, otherwise we'll think
  6861. * we have more space than we actually do.
  6862. */
  6863. exclude_super_stripes(root, cache);
  6864. /*
  6865. * check for two cases, either we are full, and therefore
  6866. * don't need to bother with the caching work since we won't
  6867. * find any space, or we are empty, and we can just add all
  6868. * the space in and be done with it. This saves us _alot_ of
  6869. * time, particularly in the full case.
  6870. */
  6871. if (found_key.offset == btrfs_block_group_used(&cache->item)) {
  6872. cache->last_byte_to_unpin = (u64)-1;
  6873. cache->cached = BTRFS_CACHE_FINISHED;
  6874. free_excluded_extents(root, cache);
  6875. } else if (btrfs_block_group_used(&cache->item) == 0) {
  6876. cache->last_byte_to_unpin = (u64)-1;
  6877. cache->cached = BTRFS_CACHE_FINISHED;
  6878. add_new_free_space(cache, root->fs_info,
  6879. found_key.objectid,
  6880. found_key.objectid +
  6881. found_key.offset);
  6882. free_excluded_extents(root, cache);
  6883. }
  6884. ret = update_space_info(info, cache->flags, found_key.offset,
  6885. btrfs_block_group_used(&cache->item),
  6886. &space_info);
  6887. BUG_ON(ret); /* -ENOMEM */
  6888. cache->space_info = space_info;
  6889. spin_lock(&cache->space_info->lock);
  6890. cache->space_info->bytes_readonly += cache->bytes_super;
  6891. spin_unlock(&cache->space_info->lock);
  6892. __link_block_group(space_info, cache);
  6893. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  6894. BUG_ON(ret); /* Logic error */
  6895. set_avail_alloc_bits(root->fs_info, cache->flags);
  6896. if (btrfs_chunk_readonly(root, cache->key.objectid))
  6897. set_block_group_ro(cache, 1);
  6898. }
  6899. list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
  6900. if (!(get_alloc_profile(root, space_info->flags) &
  6901. (BTRFS_BLOCK_GROUP_RAID10 |
  6902. BTRFS_BLOCK_GROUP_RAID1 |
  6903. BTRFS_BLOCK_GROUP_DUP)))
  6904. continue;
  6905. /*
  6906. * avoid allocating from un-mirrored block group if there are
  6907. * mirrored block groups.
  6908. */
  6909. list_for_each_entry(cache, &space_info->block_groups[3], list)
  6910. set_block_group_ro(cache, 1);
  6911. list_for_each_entry(cache, &space_info->block_groups[4], list)
  6912. set_block_group_ro(cache, 1);
  6913. }
  6914. init_global_block_rsv(info);
  6915. ret = 0;
  6916. error:
  6917. btrfs_free_path(path);
  6918. return ret;
  6919. }
  6920. void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
  6921. struct btrfs_root *root)
  6922. {
  6923. struct btrfs_block_group_cache *block_group, *tmp;
  6924. struct btrfs_root *extent_root = root->fs_info->extent_root;
  6925. struct btrfs_block_group_item item;
  6926. struct btrfs_key key;
  6927. int ret = 0;
  6928. list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
  6929. new_bg_list) {
  6930. list_del_init(&block_group->new_bg_list);
  6931. if (ret)
  6932. continue;
  6933. spin_lock(&block_group->lock);
  6934. memcpy(&item, &block_group->item, sizeof(item));
  6935. memcpy(&key, &block_group->key, sizeof(key));
  6936. spin_unlock(&block_group->lock);
  6937. ret = btrfs_insert_item(trans, extent_root, &key, &item,
  6938. sizeof(item));
  6939. if (ret)
  6940. btrfs_abort_transaction(trans, extent_root, ret);
  6941. }
  6942. }
  6943. int btrfs_make_block_group(struct btrfs_trans_handle *trans,
  6944. struct btrfs_root *root, u64 bytes_used,
  6945. u64 type, u64 chunk_objectid, u64 chunk_offset,
  6946. u64 size)
  6947. {
  6948. int ret;
  6949. struct btrfs_root *extent_root;
  6950. struct btrfs_block_group_cache *cache;
  6951. extent_root = root->fs_info->extent_root;
  6952. root->fs_info->last_trans_log_full_commit = trans->transid;
  6953. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  6954. if (!cache)
  6955. return -ENOMEM;
  6956. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  6957. GFP_NOFS);
  6958. if (!cache->free_space_ctl) {
  6959. kfree(cache);
  6960. return -ENOMEM;
  6961. }
  6962. cache->key.objectid = chunk_offset;
  6963. cache->key.offset = size;
  6964. cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  6965. cache->sectorsize = root->sectorsize;
  6966. cache->fs_info = root->fs_info;
  6967. atomic_set(&cache->count, 1);
  6968. spin_lock_init(&cache->lock);
  6969. INIT_LIST_HEAD(&cache->list);
  6970. INIT_LIST_HEAD(&cache->cluster_list);
  6971. INIT_LIST_HEAD(&cache->new_bg_list);
  6972. btrfs_init_free_space_ctl(cache);
  6973. btrfs_set_block_group_used(&cache->item, bytes_used);
  6974. btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
  6975. cache->flags = type;
  6976. btrfs_set_block_group_flags(&cache->item, type);
  6977. cache->last_byte_to_unpin = (u64)-1;
  6978. cache->cached = BTRFS_CACHE_FINISHED;
  6979. exclude_super_stripes(root, cache);
  6980. add_new_free_space(cache, root->fs_info, chunk_offset,
  6981. chunk_offset + size);
  6982. free_excluded_extents(root, cache);
  6983. ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
  6984. &cache->space_info);
  6985. BUG_ON(ret); /* -ENOMEM */
  6986. update_global_block_rsv(root->fs_info);
  6987. spin_lock(&cache->space_info->lock);
  6988. cache->space_info->bytes_readonly += cache->bytes_super;
  6989. spin_unlock(&cache->space_info->lock);
  6990. __link_block_group(cache->space_info, cache);
  6991. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  6992. BUG_ON(ret); /* Logic error */
  6993. list_add_tail(&cache->new_bg_list, &trans->new_bgs);
  6994. set_avail_alloc_bits(extent_root->fs_info, type);
  6995. return 0;
  6996. }
  6997. static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  6998. {
  6999. u64 extra_flags = chunk_to_extended(flags) &
  7000. BTRFS_EXTENDED_PROFILE_MASK;
  7001. if (flags & BTRFS_BLOCK_GROUP_DATA)
  7002. fs_info->avail_data_alloc_bits &= ~extra_flags;
  7003. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  7004. fs_info->avail_metadata_alloc_bits &= ~extra_flags;
  7005. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  7006. fs_info->avail_system_alloc_bits &= ~extra_flags;
  7007. }
  7008. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  7009. struct btrfs_root *root, u64 group_start)
  7010. {
  7011. struct btrfs_path *path;
  7012. struct btrfs_block_group_cache *block_group;
  7013. struct btrfs_free_cluster *cluster;
  7014. struct btrfs_root *tree_root = root->fs_info->tree_root;
  7015. struct btrfs_key key;
  7016. struct inode *inode;
  7017. int ret;
  7018. int index;
  7019. int factor;
  7020. root = root->fs_info->extent_root;
  7021. block_group = btrfs_lookup_block_group(root->fs_info, group_start);
  7022. BUG_ON(!block_group);
  7023. BUG_ON(!block_group->ro);
  7024. /*
  7025. * Free the reserved super bytes from this block group before
  7026. * remove it.
  7027. */
  7028. free_excluded_extents(root, block_group);
  7029. memcpy(&key, &block_group->key, sizeof(key));
  7030. index = get_block_group_index(block_group);
  7031. if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
  7032. BTRFS_BLOCK_GROUP_RAID1 |
  7033. BTRFS_BLOCK_GROUP_RAID10))
  7034. factor = 2;
  7035. else
  7036. factor = 1;
  7037. /* make sure this block group isn't part of an allocation cluster */
  7038. cluster = &root->fs_info->data_alloc_cluster;
  7039. spin_lock(&cluster->refill_lock);
  7040. btrfs_return_cluster_to_free_space(block_group, cluster);
  7041. spin_unlock(&cluster->refill_lock);
  7042. /*
  7043. * make sure this block group isn't part of a metadata
  7044. * allocation cluster
  7045. */
  7046. cluster = &root->fs_info->meta_alloc_cluster;
  7047. spin_lock(&cluster->refill_lock);
  7048. btrfs_return_cluster_to_free_space(block_group, cluster);
  7049. spin_unlock(&cluster->refill_lock);
  7050. path = btrfs_alloc_path();
  7051. if (!path) {
  7052. ret = -ENOMEM;
  7053. goto out;
  7054. }
  7055. inode = lookup_free_space_inode(tree_root, block_group, path);
  7056. if (!IS_ERR(inode)) {
  7057. ret = btrfs_orphan_add(trans, inode);
  7058. if (ret) {
  7059. btrfs_add_delayed_iput(inode);
  7060. goto out;
  7061. }
  7062. clear_nlink(inode);
  7063. /* One for the block groups ref */
  7064. spin_lock(&block_group->lock);
  7065. if (block_group->iref) {
  7066. block_group->iref = 0;
  7067. block_group->inode = NULL;
  7068. spin_unlock(&block_group->lock);
  7069. iput(inode);
  7070. } else {
  7071. spin_unlock(&block_group->lock);
  7072. }
  7073. /* One for our lookup ref */
  7074. btrfs_add_delayed_iput(inode);
  7075. }
  7076. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  7077. key.offset = block_group->key.objectid;
  7078. key.type = 0;
  7079. ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
  7080. if (ret < 0)
  7081. goto out;
  7082. if (ret > 0)
  7083. btrfs_release_path(path);
  7084. if (ret == 0) {
  7085. ret = btrfs_del_item(trans, tree_root, path);
  7086. if (ret)
  7087. goto out;
  7088. btrfs_release_path(path);
  7089. }
  7090. spin_lock(&root->fs_info->block_group_cache_lock);
  7091. rb_erase(&block_group->cache_node,
  7092. &root->fs_info->block_group_cache_tree);
  7093. if (root->fs_info->first_logical_byte == block_group->key.objectid)
  7094. root->fs_info->first_logical_byte = (u64)-1;
  7095. spin_unlock(&root->fs_info->block_group_cache_lock);
  7096. down_write(&block_group->space_info->groups_sem);
  7097. /*
  7098. * we must use list_del_init so people can check to see if they
  7099. * are still on the list after taking the semaphore
  7100. */
  7101. list_del_init(&block_group->list);
  7102. if (list_empty(&block_group->space_info->block_groups[index]))
  7103. clear_avail_alloc_bits(root->fs_info, block_group->flags);
  7104. up_write(&block_group->space_info->groups_sem);
  7105. if (block_group->cached == BTRFS_CACHE_STARTED)
  7106. wait_block_group_cache_done(block_group);
  7107. btrfs_remove_free_space_cache(block_group);
  7108. spin_lock(&block_group->space_info->lock);
  7109. block_group->space_info->total_bytes -= block_group->key.offset;
  7110. block_group->space_info->bytes_readonly -= block_group->key.offset;
  7111. block_group->space_info->disk_total -= block_group->key.offset * factor;
  7112. spin_unlock(&block_group->space_info->lock);
  7113. memcpy(&key, &block_group->key, sizeof(key));
  7114. btrfs_clear_space_info_full(root->fs_info);
  7115. btrfs_put_block_group(block_group);
  7116. btrfs_put_block_group(block_group);
  7117. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  7118. if (ret > 0)
  7119. ret = -EIO;
  7120. if (ret < 0)
  7121. goto out;
  7122. ret = btrfs_del_item(trans, root, path);
  7123. out:
  7124. btrfs_free_path(path);
  7125. return ret;
  7126. }
  7127. int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
  7128. {
  7129. struct btrfs_space_info *space_info;
  7130. struct btrfs_super_block *disk_super;
  7131. u64 features;
  7132. u64 flags;
  7133. int mixed = 0;
  7134. int ret;
  7135. disk_super = fs_info->super_copy;
  7136. if (!btrfs_super_root(disk_super))
  7137. return 1;
  7138. features = btrfs_super_incompat_flags(disk_super);
  7139. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  7140. mixed = 1;
  7141. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  7142. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7143. if (ret)
  7144. goto out;
  7145. if (mixed) {
  7146. flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
  7147. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7148. } else {
  7149. flags = BTRFS_BLOCK_GROUP_METADATA;
  7150. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7151. if (ret)
  7152. goto out;
  7153. flags = BTRFS_BLOCK_GROUP_DATA;
  7154. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7155. }
  7156. out:
  7157. return ret;
  7158. }
  7159. int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  7160. {
  7161. return unpin_extent_range(root, start, end);
  7162. }
  7163. int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
  7164. u64 num_bytes, u64 *actual_bytes)
  7165. {
  7166. return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
  7167. }
  7168. int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
  7169. {
  7170. struct btrfs_fs_info *fs_info = root->fs_info;
  7171. struct btrfs_block_group_cache *cache = NULL;
  7172. u64 group_trimmed;
  7173. u64 start;
  7174. u64 end;
  7175. u64 trimmed = 0;
  7176. u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
  7177. int ret = 0;
  7178. /*
  7179. * try to trim all FS space, our block group may start from non-zero.
  7180. */
  7181. if (range->len == total_bytes)
  7182. cache = btrfs_lookup_first_block_group(fs_info, range->start);
  7183. else
  7184. cache = btrfs_lookup_block_group(fs_info, range->start);
  7185. while (cache) {
  7186. if (cache->key.objectid >= (range->start + range->len)) {
  7187. btrfs_put_block_group(cache);
  7188. break;
  7189. }
  7190. start = max(range->start, cache->key.objectid);
  7191. end = min(range->start + range->len,
  7192. cache->key.objectid + cache->key.offset);
  7193. if (end - start >= range->minlen) {
  7194. if (!block_group_cache_done(cache)) {
  7195. ret = cache_block_group(cache, 0);
  7196. if (!ret)
  7197. wait_block_group_cache_done(cache);
  7198. }
  7199. ret = btrfs_trim_block_group(cache,
  7200. &group_trimmed,
  7201. start,
  7202. end,
  7203. range->minlen);
  7204. trimmed += group_trimmed;
  7205. if (ret) {
  7206. btrfs_put_block_group(cache);
  7207. break;
  7208. }
  7209. }
  7210. cache = next_block_group(fs_info->tree_root, cache);
  7211. }
  7212. range->len = trimmed;
  7213. return ret;
  7214. }