extent-tree.c 237 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/sort.h>
  23. #include <linux/rcupdate.h>
  24. #include <linux/kthread.h>
  25. #include <linux/slab.h>
  26. #include <linux/ratelimit.h>
  27. #include <linux/percpu_counter.h>
  28. #include "hash.h"
  29. #include "ctree.h"
  30. #include "disk-io.h"
  31. #include "print-tree.h"
  32. #include "transaction.h"
  33. #include "volumes.h"
  34. #include "raid56.h"
  35. #include "locking.h"
  36. #include "free-space-cache.h"
  37. #include "math.h"
  38. #undef SCRAMBLE_DELAYED_REFS
  39. /*
  40. * control flags for do_chunk_alloc's force field
  41. * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
  42. * if we really need one.
  43. *
  44. * CHUNK_ALLOC_LIMITED means to only try and allocate one
  45. * if we have very few chunks already allocated. This is
  46. * used as part of the clustering code to help make sure
  47. * we have a good pool of storage to cluster in, without
  48. * filling the FS with empty chunks
  49. *
  50. * CHUNK_ALLOC_FORCE means it must try to allocate one
  51. *
  52. */
  53. enum {
  54. CHUNK_ALLOC_NO_FORCE = 0,
  55. CHUNK_ALLOC_LIMITED = 1,
  56. CHUNK_ALLOC_FORCE = 2,
  57. };
  58. /*
  59. * Control how reservations are dealt with.
  60. *
  61. * RESERVE_FREE - freeing a reservation.
  62. * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
  63. * ENOSPC accounting
  64. * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
  65. * bytes_may_use as the ENOSPC accounting is done elsewhere
  66. */
  67. enum {
  68. RESERVE_FREE = 0,
  69. RESERVE_ALLOC = 1,
  70. RESERVE_ALLOC_NO_ACCOUNT = 2,
  71. };
  72. static int update_block_group(struct btrfs_root *root,
  73. u64 bytenr, u64 num_bytes, int alloc);
  74. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  75. struct btrfs_root *root,
  76. u64 bytenr, u64 num_bytes, u64 parent,
  77. u64 root_objectid, u64 owner_objectid,
  78. u64 owner_offset, int refs_to_drop,
  79. struct btrfs_delayed_extent_op *extra_op);
  80. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  81. struct extent_buffer *leaf,
  82. struct btrfs_extent_item *ei);
  83. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  84. struct btrfs_root *root,
  85. u64 parent, u64 root_objectid,
  86. u64 flags, u64 owner, u64 offset,
  87. struct btrfs_key *ins, int ref_mod);
  88. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  89. struct btrfs_root *root,
  90. u64 parent, u64 root_objectid,
  91. u64 flags, struct btrfs_disk_key *key,
  92. int level, struct btrfs_key *ins);
  93. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  94. struct btrfs_root *extent_root, u64 flags,
  95. int force);
  96. static int find_next_key(struct btrfs_path *path, int level,
  97. struct btrfs_key *key);
  98. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  99. int dump_block_groups);
  100. static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
  101. u64 num_bytes, int reserve);
  102. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  103. u64 num_bytes);
  104. int btrfs_pin_extent(struct btrfs_root *root,
  105. u64 bytenr, u64 num_bytes, int reserved);
  106. static noinline int
  107. block_group_cache_done(struct btrfs_block_group_cache *cache)
  108. {
  109. smp_mb();
  110. return cache->cached == BTRFS_CACHE_FINISHED ||
  111. cache->cached == BTRFS_CACHE_ERROR;
  112. }
  113. static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
  114. {
  115. return (cache->flags & bits) == bits;
  116. }
  117. static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
  118. {
  119. atomic_inc(&cache->count);
  120. }
  121. void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
  122. {
  123. if (atomic_dec_and_test(&cache->count)) {
  124. WARN_ON(cache->pinned > 0);
  125. WARN_ON(cache->reserved > 0);
  126. kfree(cache->free_space_ctl);
  127. kfree(cache);
  128. }
  129. }
  130. /*
  131. * this adds the block group to the fs_info rb tree for the block group
  132. * cache
  133. */
  134. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  135. struct btrfs_block_group_cache *block_group)
  136. {
  137. struct rb_node **p;
  138. struct rb_node *parent = NULL;
  139. struct btrfs_block_group_cache *cache;
  140. spin_lock(&info->block_group_cache_lock);
  141. p = &info->block_group_cache_tree.rb_node;
  142. while (*p) {
  143. parent = *p;
  144. cache = rb_entry(parent, struct btrfs_block_group_cache,
  145. cache_node);
  146. if (block_group->key.objectid < cache->key.objectid) {
  147. p = &(*p)->rb_left;
  148. } else if (block_group->key.objectid > cache->key.objectid) {
  149. p = &(*p)->rb_right;
  150. } else {
  151. spin_unlock(&info->block_group_cache_lock);
  152. return -EEXIST;
  153. }
  154. }
  155. rb_link_node(&block_group->cache_node, parent, p);
  156. rb_insert_color(&block_group->cache_node,
  157. &info->block_group_cache_tree);
  158. if (info->first_logical_byte > block_group->key.objectid)
  159. info->first_logical_byte = block_group->key.objectid;
  160. spin_unlock(&info->block_group_cache_lock);
  161. return 0;
  162. }
  163. /*
  164. * This will return the block group at or after bytenr if contains is 0, else
  165. * it will return the block group that contains the bytenr
  166. */
  167. static struct btrfs_block_group_cache *
  168. block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
  169. int contains)
  170. {
  171. struct btrfs_block_group_cache *cache, *ret = NULL;
  172. struct rb_node *n;
  173. u64 end, start;
  174. spin_lock(&info->block_group_cache_lock);
  175. n = info->block_group_cache_tree.rb_node;
  176. while (n) {
  177. cache = rb_entry(n, struct btrfs_block_group_cache,
  178. cache_node);
  179. end = cache->key.objectid + cache->key.offset - 1;
  180. start = cache->key.objectid;
  181. if (bytenr < start) {
  182. if (!contains && (!ret || start < ret->key.objectid))
  183. ret = cache;
  184. n = n->rb_left;
  185. } else if (bytenr > start) {
  186. if (contains && bytenr <= end) {
  187. ret = cache;
  188. break;
  189. }
  190. n = n->rb_right;
  191. } else {
  192. ret = cache;
  193. break;
  194. }
  195. }
  196. if (ret) {
  197. btrfs_get_block_group(ret);
  198. if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
  199. info->first_logical_byte = ret->key.objectid;
  200. }
  201. spin_unlock(&info->block_group_cache_lock);
  202. return ret;
  203. }
  204. static int add_excluded_extent(struct btrfs_root *root,
  205. u64 start, u64 num_bytes)
  206. {
  207. u64 end = start + num_bytes - 1;
  208. set_extent_bits(&root->fs_info->freed_extents[0],
  209. start, end, EXTENT_UPTODATE, GFP_NOFS);
  210. set_extent_bits(&root->fs_info->freed_extents[1],
  211. start, end, EXTENT_UPTODATE, GFP_NOFS);
  212. return 0;
  213. }
  214. static void free_excluded_extents(struct btrfs_root *root,
  215. struct btrfs_block_group_cache *cache)
  216. {
  217. u64 start, end;
  218. start = cache->key.objectid;
  219. end = start + cache->key.offset - 1;
  220. clear_extent_bits(&root->fs_info->freed_extents[0],
  221. start, end, EXTENT_UPTODATE, GFP_NOFS);
  222. clear_extent_bits(&root->fs_info->freed_extents[1],
  223. start, end, EXTENT_UPTODATE, GFP_NOFS);
  224. }
  225. static int exclude_super_stripes(struct btrfs_root *root,
  226. struct btrfs_block_group_cache *cache)
  227. {
  228. u64 bytenr;
  229. u64 *logical;
  230. int stripe_len;
  231. int i, nr, ret;
  232. if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
  233. stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
  234. cache->bytes_super += stripe_len;
  235. ret = add_excluded_extent(root, cache->key.objectid,
  236. stripe_len);
  237. if (ret)
  238. return ret;
  239. }
  240. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  241. bytenr = btrfs_sb_offset(i);
  242. ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
  243. cache->key.objectid, bytenr,
  244. 0, &logical, &nr, &stripe_len);
  245. if (ret)
  246. return ret;
  247. while (nr--) {
  248. u64 start, len;
  249. if (logical[nr] > cache->key.objectid +
  250. cache->key.offset)
  251. continue;
  252. if (logical[nr] + stripe_len <= cache->key.objectid)
  253. continue;
  254. start = logical[nr];
  255. if (start < cache->key.objectid) {
  256. start = cache->key.objectid;
  257. len = (logical[nr] + stripe_len) - start;
  258. } else {
  259. len = min_t(u64, stripe_len,
  260. cache->key.objectid +
  261. cache->key.offset - start);
  262. }
  263. cache->bytes_super += len;
  264. ret = add_excluded_extent(root, start, len);
  265. if (ret) {
  266. kfree(logical);
  267. return ret;
  268. }
  269. }
  270. kfree(logical);
  271. }
  272. return 0;
  273. }
  274. static struct btrfs_caching_control *
  275. get_caching_control(struct btrfs_block_group_cache *cache)
  276. {
  277. struct btrfs_caching_control *ctl;
  278. spin_lock(&cache->lock);
  279. if (cache->cached != BTRFS_CACHE_STARTED) {
  280. spin_unlock(&cache->lock);
  281. return NULL;
  282. }
  283. /* We're loading it the fast way, so we don't have a caching_ctl. */
  284. if (!cache->caching_ctl) {
  285. spin_unlock(&cache->lock);
  286. return NULL;
  287. }
  288. ctl = cache->caching_ctl;
  289. atomic_inc(&ctl->count);
  290. spin_unlock(&cache->lock);
  291. return ctl;
  292. }
  293. static void put_caching_control(struct btrfs_caching_control *ctl)
  294. {
  295. if (atomic_dec_and_test(&ctl->count))
  296. kfree(ctl);
  297. }
  298. /*
  299. * this is only called by cache_block_group, since we could have freed extents
  300. * we need to check the pinned_extents for any extents that can't be used yet
  301. * since their free space will be released as soon as the transaction commits.
  302. */
  303. static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
  304. struct btrfs_fs_info *info, u64 start, u64 end)
  305. {
  306. u64 extent_start, extent_end, size, total_added = 0;
  307. int ret;
  308. while (start < end) {
  309. ret = find_first_extent_bit(info->pinned_extents, start,
  310. &extent_start, &extent_end,
  311. EXTENT_DIRTY | EXTENT_UPTODATE,
  312. NULL);
  313. if (ret)
  314. break;
  315. if (extent_start <= start) {
  316. start = extent_end + 1;
  317. } else if (extent_start > start && extent_start < end) {
  318. size = extent_start - start;
  319. total_added += size;
  320. ret = btrfs_add_free_space(block_group, start,
  321. size);
  322. BUG_ON(ret); /* -ENOMEM or logic error */
  323. start = extent_end + 1;
  324. } else {
  325. break;
  326. }
  327. }
  328. if (start < end) {
  329. size = end - start;
  330. total_added += size;
  331. ret = btrfs_add_free_space(block_group, start, size);
  332. BUG_ON(ret); /* -ENOMEM or logic error */
  333. }
  334. return total_added;
  335. }
  336. static noinline void caching_thread(struct btrfs_work *work)
  337. {
  338. struct btrfs_block_group_cache *block_group;
  339. struct btrfs_fs_info *fs_info;
  340. struct btrfs_caching_control *caching_ctl;
  341. struct btrfs_root *extent_root;
  342. struct btrfs_path *path;
  343. struct extent_buffer *leaf;
  344. struct btrfs_key key;
  345. u64 total_found = 0;
  346. u64 last = 0;
  347. u32 nritems;
  348. int ret = -ENOMEM;
  349. caching_ctl = container_of(work, struct btrfs_caching_control, work);
  350. block_group = caching_ctl->block_group;
  351. fs_info = block_group->fs_info;
  352. extent_root = fs_info->extent_root;
  353. path = btrfs_alloc_path();
  354. if (!path)
  355. goto out;
  356. last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
  357. /*
  358. * We don't want to deadlock with somebody trying to allocate a new
  359. * extent for the extent root while also trying to search the extent
  360. * root to add free space. So we skip locking and search the commit
  361. * root, since its read-only
  362. */
  363. path->skip_locking = 1;
  364. path->search_commit_root = 1;
  365. path->reada = 1;
  366. key.objectid = last;
  367. key.offset = 0;
  368. key.type = BTRFS_EXTENT_ITEM_KEY;
  369. again:
  370. mutex_lock(&caching_ctl->mutex);
  371. /* need to make sure the commit_root doesn't disappear */
  372. down_read(&fs_info->extent_commit_sem);
  373. next:
  374. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  375. if (ret < 0)
  376. goto err;
  377. leaf = path->nodes[0];
  378. nritems = btrfs_header_nritems(leaf);
  379. while (1) {
  380. if (btrfs_fs_closing(fs_info) > 1) {
  381. last = (u64)-1;
  382. break;
  383. }
  384. if (path->slots[0] < nritems) {
  385. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  386. } else {
  387. ret = find_next_key(path, 0, &key);
  388. if (ret)
  389. break;
  390. if (need_resched()) {
  391. caching_ctl->progress = last;
  392. btrfs_release_path(path);
  393. up_read(&fs_info->extent_commit_sem);
  394. mutex_unlock(&caching_ctl->mutex);
  395. cond_resched();
  396. goto again;
  397. }
  398. ret = btrfs_next_leaf(extent_root, path);
  399. if (ret < 0)
  400. goto err;
  401. if (ret)
  402. break;
  403. leaf = path->nodes[0];
  404. nritems = btrfs_header_nritems(leaf);
  405. continue;
  406. }
  407. if (key.objectid < last) {
  408. key.objectid = last;
  409. key.offset = 0;
  410. key.type = BTRFS_EXTENT_ITEM_KEY;
  411. caching_ctl->progress = last;
  412. btrfs_release_path(path);
  413. goto next;
  414. }
  415. if (key.objectid < block_group->key.objectid) {
  416. path->slots[0]++;
  417. continue;
  418. }
  419. if (key.objectid >= block_group->key.objectid +
  420. block_group->key.offset)
  421. break;
  422. if (key.type == BTRFS_EXTENT_ITEM_KEY ||
  423. key.type == BTRFS_METADATA_ITEM_KEY) {
  424. total_found += add_new_free_space(block_group,
  425. fs_info, last,
  426. key.objectid);
  427. if (key.type == BTRFS_METADATA_ITEM_KEY)
  428. last = key.objectid +
  429. fs_info->tree_root->leafsize;
  430. else
  431. last = key.objectid + key.offset;
  432. if (total_found > (1024 * 1024 * 2)) {
  433. total_found = 0;
  434. wake_up(&caching_ctl->wait);
  435. }
  436. }
  437. path->slots[0]++;
  438. }
  439. ret = 0;
  440. total_found += add_new_free_space(block_group, fs_info, last,
  441. block_group->key.objectid +
  442. block_group->key.offset);
  443. caching_ctl->progress = (u64)-1;
  444. spin_lock(&block_group->lock);
  445. block_group->caching_ctl = NULL;
  446. block_group->cached = BTRFS_CACHE_FINISHED;
  447. spin_unlock(&block_group->lock);
  448. err:
  449. btrfs_free_path(path);
  450. up_read(&fs_info->extent_commit_sem);
  451. free_excluded_extents(extent_root, block_group);
  452. mutex_unlock(&caching_ctl->mutex);
  453. out:
  454. if (ret) {
  455. spin_lock(&block_group->lock);
  456. block_group->caching_ctl = NULL;
  457. block_group->cached = BTRFS_CACHE_ERROR;
  458. spin_unlock(&block_group->lock);
  459. }
  460. wake_up(&caching_ctl->wait);
  461. put_caching_control(caching_ctl);
  462. btrfs_put_block_group(block_group);
  463. }
  464. static int cache_block_group(struct btrfs_block_group_cache *cache,
  465. int load_cache_only)
  466. {
  467. DEFINE_WAIT(wait);
  468. struct btrfs_fs_info *fs_info = cache->fs_info;
  469. struct btrfs_caching_control *caching_ctl;
  470. int ret = 0;
  471. caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
  472. if (!caching_ctl)
  473. return -ENOMEM;
  474. INIT_LIST_HEAD(&caching_ctl->list);
  475. mutex_init(&caching_ctl->mutex);
  476. init_waitqueue_head(&caching_ctl->wait);
  477. caching_ctl->block_group = cache;
  478. caching_ctl->progress = cache->key.objectid;
  479. atomic_set(&caching_ctl->count, 1);
  480. caching_ctl->work.func = caching_thread;
  481. spin_lock(&cache->lock);
  482. /*
  483. * This should be a rare occasion, but this could happen I think in the
  484. * case where one thread starts to load the space cache info, and then
  485. * some other thread starts a transaction commit which tries to do an
  486. * allocation while the other thread is still loading the space cache
  487. * info. The previous loop should have kept us from choosing this block
  488. * group, but if we've moved to the state where we will wait on caching
  489. * block groups we need to first check if we're doing a fast load here,
  490. * so we can wait for it to finish, otherwise we could end up allocating
  491. * from a block group who's cache gets evicted for one reason or
  492. * another.
  493. */
  494. while (cache->cached == BTRFS_CACHE_FAST) {
  495. struct btrfs_caching_control *ctl;
  496. ctl = cache->caching_ctl;
  497. atomic_inc(&ctl->count);
  498. prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
  499. spin_unlock(&cache->lock);
  500. schedule();
  501. finish_wait(&ctl->wait, &wait);
  502. put_caching_control(ctl);
  503. spin_lock(&cache->lock);
  504. }
  505. if (cache->cached != BTRFS_CACHE_NO) {
  506. spin_unlock(&cache->lock);
  507. kfree(caching_ctl);
  508. return 0;
  509. }
  510. WARN_ON(cache->caching_ctl);
  511. cache->caching_ctl = caching_ctl;
  512. cache->cached = BTRFS_CACHE_FAST;
  513. spin_unlock(&cache->lock);
  514. if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
  515. ret = load_free_space_cache(fs_info, cache);
  516. spin_lock(&cache->lock);
  517. if (ret == 1) {
  518. cache->caching_ctl = NULL;
  519. cache->cached = BTRFS_CACHE_FINISHED;
  520. cache->last_byte_to_unpin = (u64)-1;
  521. } else {
  522. if (load_cache_only) {
  523. cache->caching_ctl = NULL;
  524. cache->cached = BTRFS_CACHE_NO;
  525. } else {
  526. cache->cached = BTRFS_CACHE_STARTED;
  527. }
  528. }
  529. spin_unlock(&cache->lock);
  530. wake_up(&caching_ctl->wait);
  531. if (ret == 1) {
  532. put_caching_control(caching_ctl);
  533. free_excluded_extents(fs_info->extent_root, cache);
  534. return 0;
  535. }
  536. } else {
  537. /*
  538. * We are not going to do the fast caching, set cached to the
  539. * appropriate value and wakeup any waiters.
  540. */
  541. spin_lock(&cache->lock);
  542. if (load_cache_only) {
  543. cache->caching_ctl = NULL;
  544. cache->cached = BTRFS_CACHE_NO;
  545. } else {
  546. cache->cached = BTRFS_CACHE_STARTED;
  547. }
  548. spin_unlock(&cache->lock);
  549. wake_up(&caching_ctl->wait);
  550. }
  551. if (load_cache_only) {
  552. put_caching_control(caching_ctl);
  553. return 0;
  554. }
  555. down_write(&fs_info->extent_commit_sem);
  556. atomic_inc(&caching_ctl->count);
  557. list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
  558. up_write(&fs_info->extent_commit_sem);
  559. btrfs_get_block_group(cache);
  560. btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
  561. return ret;
  562. }
  563. /*
  564. * return the block group that starts at or after bytenr
  565. */
  566. static struct btrfs_block_group_cache *
  567. btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
  568. {
  569. struct btrfs_block_group_cache *cache;
  570. cache = block_group_cache_tree_search(info, bytenr, 0);
  571. return cache;
  572. }
  573. /*
  574. * return the block group that contains the given bytenr
  575. */
  576. struct btrfs_block_group_cache *btrfs_lookup_block_group(
  577. struct btrfs_fs_info *info,
  578. u64 bytenr)
  579. {
  580. struct btrfs_block_group_cache *cache;
  581. cache = block_group_cache_tree_search(info, bytenr, 1);
  582. return cache;
  583. }
  584. static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
  585. u64 flags)
  586. {
  587. struct list_head *head = &info->space_info;
  588. struct btrfs_space_info *found;
  589. flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
  590. rcu_read_lock();
  591. list_for_each_entry_rcu(found, head, list) {
  592. if (found->flags & flags) {
  593. rcu_read_unlock();
  594. return found;
  595. }
  596. }
  597. rcu_read_unlock();
  598. return NULL;
  599. }
  600. /*
  601. * after adding space to the filesystem, we need to clear the full flags
  602. * on all the space infos.
  603. */
  604. void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
  605. {
  606. struct list_head *head = &info->space_info;
  607. struct btrfs_space_info *found;
  608. rcu_read_lock();
  609. list_for_each_entry_rcu(found, head, list)
  610. found->full = 0;
  611. rcu_read_unlock();
  612. }
  613. /* simple helper to search for an existing extent at a given offset */
  614. int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
  615. {
  616. int ret;
  617. struct btrfs_key key;
  618. struct btrfs_path *path;
  619. path = btrfs_alloc_path();
  620. if (!path)
  621. return -ENOMEM;
  622. key.objectid = start;
  623. key.offset = len;
  624. key.type = BTRFS_EXTENT_ITEM_KEY;
  625. ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
  626. 0, 0);
  627. if (ret > 0) {
  628. btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
  629. if (key.objectid == start &&
  630. key.type == BTRFS_METADATA_ITEM_KEY)
  631. ret = 0;
  632. }
  633. btrfs_free_path(path);
  634. return ret;
  635. }
  636. /*
  637. * helper function to lookup reference count and flags of a tree block.
  638. *
  639. * the head node for delayed ref is used to store the sum of all the
  640. * reference count modifications queued up in the rbtree. the head
  641. * node may also store the extent flags to set. This way you can check
  642. * to see what the reference count and extent flags would be if all of
  643. * the delayed refs are not processed.
  644. */
  645. int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
  646. struct btrfs_root *root, u64 bytenr,
  647. u64 offset, int metadata, u64 *refs, u64 *flags)
  648. {
  649. struct btrfs_delayed_ref_head *head;
  650. struct btrfs_delayed_ref_root *delayed_refs;
  651. struct btrfs_path *path;
  652. struct btrfs_extent_item *ei;
  653. struct extent_buffer *leaf;
  654. struct btrfs_key key;
  655. u32 item_size;
  656. u64 num_refs;
  657. u64 extent_flags;
  658. int ret;
  659. /*
  660. * If we don't have skinny metadata, don't bother doing anything
  661. * different
  662. */
  663. if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
  664. offset = root->leafsize;
  665. metadata = 0;
  666. }
  667. path = btrfs_alloc_path();
  668. if (!path)
  669. return -ENOMEM;
  670. if (metadata) {
  671. key.objectid = bytenr;
  672. key.type = BTRFS_METADATA_ITEM_KEY;
  673. key.offset = offset;
  674. } else {
  675. key.objectid = bytenr;
  676. key.type = BTRFS_EXTENT_ITEM_KEY;
  677. key.offset = offset;
  678. }
  679. if (!trans) {
  680. path->skip_locking = 1;
  681. path->search_commit_root = 1;
  682. }
  683. again:
  684. ret = btrfs_search_slot(trans, root->fs_info->extent_root,
  685. &key, path, 0, 0);
  686. if (ret < 0)
  687. goto out_free;
  688. if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
  689. metadata = 0;
  690. if (path->slots[0]) {
  691. path->slots[0]--;
  692. btrfs_item_key_to_cpu(path->nodes[0], &key,
  693. path->slots[0]);
  694. if (key.objectid == bytenr &&
  695. key.type == BTRFS_EXTENT_ITEM_KEY &&
  696. key.offset == root->leafsize)
  697. ret = 0;
  698. }
  699. if (ret) {
  700. key.objectid = bytenr;
  701. key.type = BTRFS_EXTENT_ITEM_KEY;
  702. key.offset = root->leafsize;
  703. btrfs_release_path(path);
  704. goto again;
  705. }
  706. }
  707. if (ret == 0) {
  708. leaf = path->nodes[0];
  709. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  710. if (item_size >= sizeof(*ei)) {
  711. ei = btrfs_item_ptr(leaf, path->slots[0],
  712. struct btrfs_extent_item);
  713. num_refs = btrfs_extent_refs(leaf, ei);
  714. extent_flags = btrfs_extent_flags(leaf, ei);
  715. } else {
  716. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  717. struct btrfs_extent_item_v0 *ei0;
  718. BUG_ON(item_size != sizeof(*ei0));
  719. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  720. struct btrfs_extent_item_v0);
  721. num_refs = btrfs_extent_refs_v0(leaf, ei0);
  722. /* FIXME: this isn't correct for data */
  723. extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  724. #else
  725. BUG();
  726. #endif
  727. }
  728. BUG_ON(num_refs == 0);
  729. } else {
  730. num_refs = 0;
  731. extent_flags = 0;
  732. ret = 0;
  733. }
  734. if (!trans)
  735. goto out;
  736. delayed_refs = &trans->transaction->delayed_refs;
  737. spin_lock(&delayed_refs->lock);
  738. head = btrfs_find_delayed_ref_head(trans, bytenr);
  739. if (head) {
  740. if (!mutex_trylock(&head->mutex)) {
  741. atomic_inc(&head->node.refs);
  742. spin_unlock(&delayed_refs->lock);
  743. btrfs_release_path(path);
  744. /*
  745. * Mutex was contended, block until it's released and try
  746. * again
  747. */
  748. mutex_lock(&head->mutex);
  749. mutex_unlock(&head->mutex);
  750. btrfs_put_delayed_ref(&head->node);
  751. goto again;
  752. }
  753. if (head->extent_op && head->extent_op->update_flags)
  754. extent_flags |= head->extent_op->flags_to_set;
  755. else
  756. BUG_ON(num_refs == 0);
  757. num_refs += head->node.ref_mod;
  758. mutex_unlock(&head->mutex);
  759. }
  760. spin_unlock(&delayed_refs->lock);
  761. out:
  762. WARN_ON(num_refs == 0);
  763. if (refs)
  764. *refs = num_refs;
  765. if (flags)
  766. *flags = extent_flags;
  767. out_free:
  768. btrfs_free_path(path);
  769. return ret;
  770. }
  771. /*
  772. * Back reference rules. Back refs have three main goals:
  773. *
  774. * 1) differentiate between all holders of references to an extent so that
  775. * when a reference is dropped we can make sure it was a valid reference
  776. * before freeing the extent.
  777. *
  778. * 2) Provide enough information to quickly find the holders of an extent
  779. * if we notice a given block is corrupted or bad.
  780. *
  781. * 3) Make it easy to migrate blocks for FS shrinking or storage pool
  782. * maintenance. This is actually the same as #2, but with a slightly
  783. * different use case.
  784. *
  785. * There are two kinds of back refs. The implicit back refs is optimized
  786. * for pointers in non-shared tree blocks. For a given pointer in a block,
  787. * back refs of this kind provide information about the block's owner tree
  788. * and the pointer's key. These information allow us to find the block by
  789. * b-tree searching. The full back refs is for pointers in tree blocks not
  790. * referenced by their owner trees. The location of tree block is recorded
  791. * in the back refs. Actually the full back refs is generic, and can be
  792. * used in all cases the implicit back refs is used. The major shortcoming
  793. * of the full back refs is its overhead. Every time a tree block gets
  794. * COWed, we have to update back refs entry for all pointers in it.
  795. *
  796. * For a newly allocated tree block, we use implicit back refs for
  797. * pointers in it. This means most tree related operations only involve
  798. * implicit back refs. For a tree block created in old transaction, the
  799. * only way to drop a reference to it is COW it. So we can detect the
  800. * event that tree block loses its owner tree's reference and do the
  801. * back refs conversion.
  802. *
  803. * When a tree block is COW'd through a tree, there are four cases:
  804. *
  805. * The reference count of the block is one and the tree is the block's
  806. * owner tree. Nothing to do in this case.
  807. *
  808. * The reference count of the block is one and the tree is not the
  809. * block's owner tree. In this case, full back refs is used for pointers
  810. * in the block. Remove these full back refs, add implicit back refs for
  811. * every pointers in the new block.
  812. *
  813. * The reference count of the block is greater than one and the tree is
  814. * the block's owner tree. In this case, implicit back refs is used for
  815. * pointers in the block. Add full back refs for every pointers in the
  816. * block, increase lower level extents' reference counts. The original
  817. * implicit back refs are entailed to the new block.
  818. *
  819. * The reference count of the block is greater than one and the tree is
  820. * not the block's owner tree. Add implicit back refs for every pointer in
  821. * the new block, increase lower level extents' reference count.
  822. *
  823. * Back Reference Key composing:
  824. *
  825. * The key objectid corresponds to the first byte in the extent,
  826. * The key type is used to differentiate between types of back refs.
  827. * There are different meanings of the key offset for different types
  828. * of back refs.
  829. *
  830. * File extents can be referenced by:
  831. *
  832. * - multiple snapshots, subvolumes, or different generations in one subvol
  833. * - different files inside a single subvolume
  834. * - different offsets inside a file (bookend extents in file.c)
  835. *
  836. * The extent ref structure for the implicit back refs has fields for:
  837. *
  838. * - Objectid of the subvolume root
  839. * - objectid of the file holding the reference
  840. * - original offset in the file
  841. * - how many bookend extents
  842. *
  843. * The key offset for the implicit back refs is hash of the first
  844. * three fields.
  845. *
  846. * The extent ref structure for the full back refs has field for:
  847. *
  848. * - number of pointers in the tree leaf
  849. *
  850. * The key offset for the implicit back refs is the first byte of
  851. * the tree leaf
  852. *
  853. * When a file extent is allocated, The implicit back refs is used.
  854. * the fields are filled in:
  855. *
  856. * (root_key.objectid, inode objectid, offset in file, 1)
  857. *
  858. * When a file extent is removed file truncation, we find the
  859. * corresponding implicit back refs and check the following fields:
  860. *
  861. * (btrfs_header_owner(leaf), inode objectid, offset in file)
  862. *
  863. * Btree extents can be referenced by:
  864. *
  865. * - Different subvolumes
  866. *
  867. * Both the implicit back refs and the full back refs for tree blocks
  868. * only consist of key. The key offset for the implicit back refs is
  869. * objectid of block's owner tree. The key offset for the full back refs
  870. * is the first byte of parent block.
  871. *
  872. * When implicit back refs is used, information about the lowest key and
  873. * level of the tree block are required. These information are stored in
  874. * tree block info structure.
  875. */
  876. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  877. static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
  878. struct btrfs_root *root,
  879. struct btrfs_path *path,
  880. u64 owner, u32 extra_size)
  881. {
  882. struct btrfs_extent_item *item;
  883. struct btrfs_extent_item_v0 *ei0;
  884. struct btrfs_extent_ref_v0 *ref0;
  885. struct btrfs_tree_block_info *bi;
  886. struct extent_buffer *leaf;
  887. struct btrfs_key key;
  888. struct btrfs_key found_key;
  889. u32 new_size = sizeof(*item);
  890. u64 refs;
  891. int ret;
  892. leaf = path->nodes[0];
  893. BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
  894. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  895. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  896. struct btrfs_extent_item_v0);
  897. refs = btrfs_extent_refs_v0(leaf, ei0);
  898. if (owner == (u64)-1) {
  899. while (1) {
  900. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  901. ret = btrfs_next_leaf(root, path);
  902. if (ret < 0)
  903. return ret;
  904. BUG_ON(ret > 0); /* Corruption */
  905. leaf = path->nodes[0];
  906. }
  907. btrfs_item_key_to_cpu(leaf, &found_key,
  908. path->slots[0]);
  909. BUG_ON(key.objectid != found_key.objectid);
  910. if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
  911. path->slots[0]++;
  912. continue;
  913. }
  914. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  915. struct btrfs_extent_ref_v0);
  916. owner = btrfs_ref_objectid_v0(leaf, ref0);
  917. break;
  918. }
  919. }
  920. btrfs_release_path(path);
  921. if (owner < BTRFS_FIRST_FREE_OBJECTID)
  922. new_size += sizeof(*bi);
  923. new_size -= sizeof(*ei0);
  924. ret = btrfs_search_slot(trans, root, &key, path,
  925. new_size + extra_size, 1);
  926. if (ret < 0)
  927. return ret;
  928. BUG_ON(ret); /* Corruption */
  929. btrfs_extend_item(root, path, new_size);
  930. leaf = path->nodes[0];
  931. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  932. btrfs_set_extent_refs(leaf, item, refs);
  933. /* FIXME: get real generation */
  934. btrfs_set_extent_generation(leaf, item, 0);
  935. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  936. btrfs_set_extent_flags(leaf, item,
  937. BTRFS_EXTENT_FLAG_TREE_BLOCK |
  938. BTRFS_BLOCK_FLAG_FULL_BACKREF);
  939. bi = (struct btrfs_tree_block_info *)(item + 1);
  940. /* FIXME: get first key of the block */
  941. memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
  942. btrfs_set_tree_block_level(leaf, bi, (int)owner);
  943. } else {
  944. btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
  945. }
  946. btrfs_mark_buffer_dirty(leaf);
  947. return 0;
  948. }
  949. #endif
  950. static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
  951. {
  952. u32 high_crc = ~(u32)0;
  953. u32 low_crc = ~(u32)0;
  954. __le64 lenum;
  955. lenum = cpu_to_le64(root_objectid);
  956. high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
  957. lenum = cpu_to_le64(owner);
  958. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  959. lenum = cpu_to_le64(offset);
  960. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  961. return ((u64)high_crc << 31) ^ (u64)low_crc;
  962. }
  963. static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
  964. struct btrfs_extent_data_ref *ref)
  965. {
  966. return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
  967. btrfs_extent_data_ref_objectid(leaf, ref),
  968. btrfs_extent_data_ref_offset(leaf, ref));
  969. }
  970. static int match_extent_data_ref(struct extent_buffer *leaf,
  971. struct btrfs_extent_data_ref *ref,
  972. u64 root_objectid, u64 owner, u64 offset)
  973. {
  974. if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
  975. btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
  976. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  977. return 0;
  978. return 1;
  979. }
  980. static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
  981. struct btrfs_root *root,
  982. struct btrfs_path *path,
  983. u64 bytenr, u64 parent,
  984. u64 root_objectid,
  985. u64 owner, u64 offset)
  986. {
  987. struct btrfs_key key;
  988. struct btrfs_extent_data_ref *ref;
  989. struct extent_buffer *leaf;
  990. u32 nritems;
  991. int ret;
  992. int recow;
  993. int err = -ENOENT;
  994. key.objectid = bytenr;
  995. if (parent) {
  996. key.type = BTRFS_SHARED_DATA_REF_KEY;
  997. key.offset = parent;
  998. } else {
  999. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1000. key.offset = hash_extent_data_ref(root_objectid,
  1001. owner, offset);
  1002. }
  1003. again:
  1004. recow = 0;
  1005. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1006. if (ret < 0) {
  1007. err = ret;
  1008. goto fail;
  1009. }
  1010. if (parent) {
  1011. if (!ret)
  1012. return 0;
  1013. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1014. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1015. btrfs_release_path(path);
  1016. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1017. if (ret < 0) {
  1018. err = ret;
  1019. goto fail;
  1020. }
  1021. if (!ret)
  1022. return 0;
  1023. #endif
  1024. goto fail;
  1025. }
  1026. leaf = path->nodes[0];
  1027. nritems = btrfs_header_nritems(leaf);
  1028. while (1) {
  1029. if (path->slots[0] >= nritems) {
  1030. ret = btrfs_next_leaf(root, path);
  1031. if (ret < 0)
  1032. err = ret;
  1033. if (ret)
  1034. goto fail;
  1035. leaf = path->nodes[0];
  1036. nritems = btrfs_header_nritems(leaf);
  1037. recow = 1;
  1038. }
  1039. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1040. if (key.objectid != bytenr ||
  1041. key.type != BTRFS_EXTENT_DATA_REF_KEY)
  1042. goto fail;
  1043. ref = btrfs_item_ptr(leaf, path->slots[0],
  1044. struct btrfs_extent_data_ref);
  1045. if (match_extent_data_ref(leaf, ref, root_objectid,
  1046. owner, offset)) {
  1047. if (recow) {
  1048. btrfs_release_path(path);
  1049. goto again;
  1050. }
  1051. err = 0;
  1052. break;
  1053. }
  1054. path->slots[0]++;
  1055. }
  1056. fail:
  1057. return err;
  1058. }
  1059. static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
  1060. struct btrfs_root *root,
  1061. struct btrfs_path *path,
  1062. u64 bytenr, u64 parent,
  1063. u64 root_objectid, u64 owner,
  1064. u64 offset, int refs_to_add)
  1065. {
  1066. struct btrfs_key key;
  1067. struct extent_buffer *leaf;
  1068. u32 size;
  1069. u32 num_refs;
  1070. int ret;
  1071. key.objectid = bytenr;
  1072. if (parent) {
  1073. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1074. key.offset = parent;
  1075. size = sizeof(struct btrfs_shared_data_ref);
  1076. } else {
  1077. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1078. key.offset = hash_extent_data_ref(root_objectid,
  1079. owner, offset);
  1080. size = sizeof(struct btrfs_extent_data_ref);
  1081. }
  1082. ret = btrfs_insert_empty_item(trans, root, path, &key, size);
  1083. if (ret && ret != -EEXIST)
  1084. goto fail;
  1085. leaf = path->nodes[0];
  1086. if (parent) {
  1087. struct btrfs_shared_data_ref *ref;
  1088. ref = btrfs_item_ptr(leaf, path->slots[0],
  1089. struct btrfs_shared_data_ref);
  1090. if (ret == 0) {
  1091. btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
  1092. } else {
  1093. num_refs = btrfs_shared_data_ref_count(leaf, ref);
  1094. num_refs += refs_to_add;
  1095. btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
  1096. }
  1097. } else {
  1098. struct btrfs_extent_data_ref *ref;
  1099. while (ret == -EEXIST) {
  1100. ref = btrfs_item_ptr(leaf, path->slots[0],
  1101. struct btrfs_extent_data_ref);
  1102. if (match_extent_data_ref(leaf, ref, root_objectid,
  1103. owner, offset))
  1104. break;
  1105. btrfs_release_path(path);
  1106. key.offset++;
  1107. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1108. size);
  1109. if (ret && ret != -EEXIST)
  1110. goto fail;
  1111. leaf = path->nodes[0];
  1112. }
  1113. ref = btrfs_item_ptr(leaf, path->slots[0],
  1114. struct btrfs_extent_data_ref);
  1115. if (ret == 0) {
  1116. btrfs_set_extent_data_ref_root(leaf, ref,
  1117. root_objectid);
  1118. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  1119. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  1120. btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
  1121. } else {
  1122. num_refs = btrfs_extent_data_ref_count(leaf, ref);
  1123. num_refs += refs_to_add;
  1124. btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
  1125. }
  1126. }
  1127. btrfs_mark_buffer_dirty(leaf);
  1128. ret = 0;
  1129. fail:
  1130. btrfs_release_path(path);
  1131. return ret;
  1132. }
  1133. static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
  1134. struct btrfs_root *root,
  1135. struct btrfs_path *path,
  1136. int refs_to_drop)
  1137. {
  1138. struct btrfs_key key;
  1139. struct btrfs_extent_data_ref *ref1 = NULL;
  1140. struct btrfs_shared_data_ref *ref2 = NULL;
  1141. struct extent_buffer *leaf;
  1142. u32 num_refs = 0;
  1143. int ret = 0;
  1144. leaf = path->nodes[0];
  1145. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1146. if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1147. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1148. struct btrfs_extent_data_ref);
  1149. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1150. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1151. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1152. struct btrfs_shared_data_ref);
  1153. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1154. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1155. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1156. struct btrfs_extent_ref_v0 *ref0;
  1157. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1158. struct btrfs_extent_ref_v0);
  1159. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1160. #endif
  1161. } else {
  1162. BUG();
  1163. }
  1164. BUG_ON(num_refs < refs_to_drop);
  1165. num_refs -= refs_to_drop;
  1166. if (num_refs == 0) {
  1167. ret = btrfs_del_item(trans, root, path);
  1168. } else {
  1169. if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
  1170. btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
  1171. else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
  1172. btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
  1173. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1174. else {
  1175. struct btrfs_extent_ref_v0 *ref0;
  1176. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1177. struct btrfs_extent_ref_v0);
  1178. btrfs_set_ref_count_v0(leaf, ref0, num_refs);
  1179. }
  1180. #endif
  1181. btrfs_mark_buffer_dirty(leaf);
  1182. }
  1183. return ret;
  1184. }
  1185. static noinline u32 extent_data_ref_count(struct btrfs_root *root,
  1186. struct btrfs_path *path,
  1187. struct btrfs_extent_inline_ref *iref)
  1188. {
  1189. struct btrfs_key key;
  1190. struct extent_buffer *leaf;
  1191. struct btrfs_extent_data_ref *ref1;
  1192. struct btrfs_shared_data_ref *ref2;
  1193. u32 num_refs = 0;
  1194. leaf = path->nodes[0];
  1195. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1196. if (iref) {
  1197. if (btrfs_extent_inline_ref_type(leaf, iref) ==
  1198. BTRFS_EXTENT_DATA_REF_KEY) {
  1199. ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
  1200. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1201. } else {
  1202. ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
  1203. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1204. }
  1205. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1206. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1207. struct btrfs_extent_data_ref);
  1208. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1209. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1210. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1211. struct btrfs_shared_data_ref);
  1212. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1213. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1214. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1215. struct btrfs_extent_ref_v0 *ref0;
  1216. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1217. struct btrfs_extent_ref_v0);
  1218. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1219. #endif
  1220. } else {
  1221. WARN_ON(1);
  1222. }
  1223. return num_refs;
  1224. }
  1225. static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
  1226. struct btrfs_root *root,
  1227. struct btrfs_path *path,
  1228. u64 bytenr, u64 parent,
  1229. u64 root_objectid)
  1230. {
  1231. struct btrfs_key key;
  1232. int ret;
  1233. key.objectid = bytenr;
  1234. if (parent) {
  1235. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1236. key.offset = parent;
  1237. } else {
  1238. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1239. key.offset = root_objectid;
  1240. }
  1241. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1242. if (ret > 0)
  1243. ret = -ENOENT;
  1244. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1245. if (ret == -ENOENT && parent) {
  1246. btrfs_release_path(path);
  1247. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1248. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1249. if (ret > 0)
  1250. ret = -ENOENT;
  1251. }
  1252. #endif
  1253. return ret;
  1254. }
  1255. static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
  1256. struct btrfs_root *root,
  1257. struct btrfs_path *path,
  1258. u64 bytenr, u64 parent,
  1259. u64 root_objectid)
  1260. {
  1261. struct btrfs_key key;
  1262. int ret;
  1263. key.objectid = bytenr;
  1264. if (parent) {
  1265. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1266. key.offset = parent;
  1267. } else {
  1268. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1269. key.offset = root_objectid;
  1270. }
  1271. ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
  1272. btrfs_release_path(path);
  1273. return ret;
  1274. }
  1275. static inline int extent_ref_type(u64 parent, u64 owner)
  1276. {
  1277. int type;
  1278. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1279. if (parent > 0)
  1280. type = BTRFS_SHARED_BLOCK_REF_KEY;
  1281. else
  1282. type = BTRFS_TREE_BLOCK_REF_KEY;
  1283. } else {
  1284. if (parent > 0)
  1285. type = BTRFS_SHARED_DATA_REF_KEY;
  1286. else
  1287. type = BTRFS_EXTENT_DATA_REF_KEY;
  1288. }
  1289. return type;
  1290. }
  1291. static int find_next_key(struct btrfs_path *path, int level,
  1292. struct btrfs_key *key)
  1293. {
  1294. for (; level < BTRFS_MAX_LEVEL; level++) {
  1295. if (!path->nodes[level])
  1296. break;
  1297. if (path->slots[level] + 1 >=
  1298. btrfs_header_nritems(path->nodes[level]))
  1299. continue;
  1300. if (level == 0)
  1301. btrfs_item_key_to_cpu(path->nodes[level], key,
  1302. path->slots[level] + 1);
  1303. else
  1304. btrfs_node_key_to_cpu(path->nodes[level], key,
  1305. path->slots[level] + 1);
  1306. return 0;
  1307. }
  1308. return 1;
  1309. }
  1310. /*
  1311. * look for inline back ref. if back ref is found, *ref_ret is set
  1312. * to the address of inline back ref, and 0 is returned.
  1313. *
  1314. * if back ref isn't found, *ref_ret is set to the address where it
  1315. * should be inserted, and -ENOENT is returned.
  1316. *
  1317. * if insert is true and there are too many inline back refs, the path
  1318. * points to the extent item, and -EAGAIN is returned.
  1319. *
  1320. * NOTE: inline back refs are ordered in the same way that back ref
  1321. * items in the tree are ordered.
  1322. */
  1323. static noinline_for_stack
  1324. int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1325. struct btrfs_root *root,
  1326. struct btrfs_path *path,
  1327. struct btrfs_extent_inline_ref **ref_ret,
  1328. u64 bytenr, u64 num_bytes,
  1329. u64 parent, u64 root_objectid,
  1330. u64 owner, u64 offset, int insert)
  1331. {
  1332. struct btrfs_key key;
  1333. struct extent_buffer *leaf;
  1334. struct btrfs_extent_item *ei;
  1335. struct btrfs_extent_inline_ref *iref;
  1336. u64 flags;
  1337. u64 item_size;
  1338. unsigned long ptr;
  1339. unsigned long end;
  1340. int extra_size;
  1341. int type;
  1342. int want;
  1343. int ret;
  1344. int err = 0;
  1345. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  1346. SKINNY_METADATA);
  1347. key.objectid = bytenr;
  1348. key.type = BTRFS_EXTENT_ITEM_KEY;
  1349. key.offset = num_bytes;
  1350. want = extent_ref_type(parent, owner);
  1351. if (insert) {
  1352. extra_size = btrfs_extent_inline_ref_size(want);
  1353. path->keep_locks = 1;
  1354. } else
  1355. extra_size = -1;
  1356. /*
  1357. * Owner is our parent level, so we can just add one to get the level
  1358. * for the block we are interested in.
  1359. */
  1360. if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
  1361. key.type = BTRFS_METADATA_ITEM_KEY;
  1362. key.offset = owner;
  1363. }
  1364. again:
  1365. ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
  1366. if (ret < 0) {
  1367. err = ret;
  1368. goto out;
  1369. }
  1370. /*
  1371. * We may be a newly converted file system which still has the old fat
  1372. * extent entries for metadata, so try and see if we have one of those.
  1373. */
  1374. if (ret > 0 && skinny_metadata) {
  1375. skinny_metadata = false;
  1376. if (path->slots[0]) {
  1377. path->slots[0]--;
  1378. btrfs_item_key_to_cpu(path->nodes[0], &key,
  1379. path->slots[0]);
  1380. if (key.objectid == bytenr &&
  1381. key.type == BTRFS_EXTENT_ITEM_KEY &&
  1382. key.offset == num_bytes)
  1383. ret = 0;
  1384. }
  1385. if (ret) {
  1386. key.type = BTRFS_EXTENT_ITEM_KEY;
  1387. key.offset = num_bytes;
  1388. btrfs_release_path(path);
  1389. goto again;
  1390. }
  1391. }
  1392. if (ret && !insert) {
  1393. err = -ENOENT;
  1394. goto out;
  1395. } else if (WARN_ON(ret)) {
  1396. err = -EIO;
  1397. goto out;
  1398. }
  1399. leaf = path->nodes[0];
  1400. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1401. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1402. if (item_size < sizeof(*ei)) {
  1403. if (!insert) {
  1404. err = -ENOENT;
  1405. goto out;
  1406. }
  1407. ret = convert_extent_item_v0(trans, root, path, owner,
  1408. extra_size);
  1409. if (ret < 0) {
  1410. err = ret;
  1411. goto out;
  1412. }
  1413. leaf = path->nodes[0];
  1414. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1415. }
  1416. #endif
  1417. BUG_ON(item_size < sizeof(*ei));
  1418. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1419. flags = btrfs_extent_flags(leaf, ei);
  1420. ptr = (unsigned long)(ei + 1);
  1421. end = (unsigned long)ei + item_size;
  1422. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
  1423. ptr += sizeof(struct btrfs_tree_block_info);
  1424. BUG_ON(ptr > end);
  1425. }
  1426. err = -ENOENT;
  1427. while (1) {
  1428. if (ptr >= end) {
  1429. WARN_ON(ptr > end);
  1430. break;
  1431. }
  1432. iref = (struct btrfs_extent_inline_ref *)ptr;
  1433. type = btrfs_extent_inline_ref_type(leaf, iref);
  1434. if (want < type)
  1435. break;
  1436. if (want > type) {
  1437. ptr += btrfs_extent_inline_ref_size(type);
  1438. continue;
  1439. }
  1440. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1441. struct btrfs_extent_data_ref *dref;
  1442. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1443. if (match_extent_data_ref(leaf, dref, root_objectid,
  1444. owner, offset)) {
  1445. err = 0;
  1446. break;
  1447. }
  1448. if (hash_extent_data_ref_item(leaf, dref) <
  1449. hash_extent_data_ref(root_objectid, owner, offset))
  1450. break;
  1451. } else {
  1452. u64 ref_offset;
  1453. ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
  1454. if (parent > 0) {
  1455. if (parent == ref_offset) {
  1456. err = 0;
  1457. break;
  1458. }
  1459. if (ref_offset < parent)
  1460. break;
  1461. } else {
  1462. if (root_objectid == ref_offset) {
  1463. err = 0;
  1464. break;
  1465. }
  1466. if (ref_offset < root_objectid)
  1467. break;
  1468. }
  1469. }
  1470. ptr += btrfs_extent_inline_ref_size(type);
  1471. }
  1472. if (err == -ENOENT && insert) {
  1473. if (item_size + extra_size >=
  1474. BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
  1475. err = -EAGAIN;
  1476. goto out;
  1477. }
  1478. /*
  1479. * To add new inline back ref, we have to make sure
  1480. * there is no corresponding back ref item.
  1481. * For simplicity, we just do not add new inline back
  1482. * ref if there is any kind of item for this block
  1483. */
  1484. if (find_next_key(path, 0, &key) == 0 &&
  1485. key.objectid == bytenr &&
  1486. key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
  1487. err = -EAGAIN;
  1488. goto out;
  1489. }
  1490. }
  1491. *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
  1492. out:
  1493. if (insert) {
  1494. path->keep_locks = 0;
  1495. btrfs_unlock_up_safe(path, 1);
  1496. }
  1497. return err;
  1498. }
  1499. /*
  1500. * helper to add new inline back ref
  1501. */
  1502. static noinline_for_stack
  1503. void setup_inline_extent_backref(struct btrfs_root *root,
  1504. struct btrfs_path *path,
  1505. struct btrfs_extent_inline_ref *iref,
  1506. u64 parent, u64 root_objectid,
  1507. u64 owner, u64 offset, int refs_to_add,
  1508. struct btrfs_delayed_extent_op *extent_op)
  1509. {
  1510. struct extent_buffer *leaf;
  1511. struct btrfs_extent_item *ei;
  1512. unsigned long ptr;
  1513. unsigned long end;
  1514. unsigned long item_offset;
  1515. u64 refs;
  1516. int size;
  1517. int type;
  1518. leaf = path->nodes[0];
  1519. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1520. item_offset = (unsigned long)iref - (unsigned long)ei;
  1521. type = extent_ref_type(parent, owner);
  1522. size = btrfs_extent_inline_ref_size(type);
  1523. btrfs_extend_item(root, path, size);
  1524. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1525. refs = btrfs_extent_refs(leaf, ei);
  1526. refs += refs_to_add;
  1527. btrfs_set_extent_refs(leaf, ei, refs);
  1528. if (extent_op)
  1529. __run_delayed_extent_op(extent_op, leaf, ei);
  1530. ptr = (unsigned long)ei + item_offset;
  1531. end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
  1532. if (ptr < end - size)
  1533. memmove_extent_buffer(leaf, ptr + size, ptr,
  1534. end - size - ptr);
  1535. iref = (struct btrfs_extent_inline_ref *)ptr;
  1536. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  1537. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1538. struct btrfs_extent_data_ref *dref;
  1539. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1540. btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
  1541. btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
  1542. btrfs_set_extent_data_ref_offset(leaf, dref, offset);
  1543. btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
  1544. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1545. struct btrfs_shared_data_ref *sref;
  1546. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1547. btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
  1548. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1549. } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  1550. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1551. } else {
  1552. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  1553. }
  1554. btrfs_mark_buffer_dirty(leaf);
  1555. }
  1556. static int lookup_extent_backref(struct btrfs_trans_handle *trans,
  1557. struct btrfs_root *root,
  1558. struct btrfs_path *path,
  1559. struct btrfs_extent_inline_ref **ref_ret,
  1560. u64 bytenr, u64 num_bytes, u64 parent,
  1561. u64 root_objectid, u64 owner, u64 offset)
  1562. {
  1563. int ret;
  1564. ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
  1565. bytenr, num_bytes, parent,
  1566. root_objectid, owner, offset, 0);
  1567. if (ret != -ENOENT)
  1568. return ret;
  1569. btrfs_release_path(path);
  1570. *ref_ret = NULL;
  1571. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1572. ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
  1573. root_objectid);
  1574. } else {
  1575. ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
  1576. root_objectid, owner, offset);
  1577. }
  1578. return ret;
  1579. }
  1580. /*
  1581. * helper to update/remove inline back ref
  1582. */
  1583. static noinline_for_stack
  1584. void update_inline_extent_backref(struct btrfs_root *root,
  1585. struct btrfs_path *path,
  1586. struct btrfs_extent_inline_ref *iref,
  1587. int refs_to_mod,
  1588. struct btrfs_delayed_extent_op *extent_op)
  1589. {
  1590. struct extent_buffer *leaf;
  1591. struct btrfs_extent_item *ei;
  1592. struct btrfs_extent_data_ref *dref = NULL;
  1593. struct btrfs_shared_data_ref *sref = NULL;
  1594. unsigned long ptr;
  1595. unsigned long end;
  1596. u32 item_size;
  1597. int size;
  1598. int type;
  1599. u64 refs;
  1600. leaf = path->nodes[0];
  1601. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1602. refs = btrfs_extent_refs(leaf, ei);
  1603. WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
  1604. refs += refs_to_mod;
  1605. btrfs_set_extent_refs(leaf, ei, refs);
  1606. if (extent_op)
  1607. __run_delayed_extent_op(extent_op, leaf, ei);
  1608. type = btrfs_extent_inline_ref_type(leaf, iref);
  1609. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1610. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1611. refs = btrfs_extent_data_ref_count(leaf, dref);
  1612. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1613. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1614. refs = btrfs_shared_data_ref_count(leaf, sref);
  1615. } else {
  1616. refs = 1;
  1617. BUG_ON(refs_to_mod != -1);
  1618. }
  1619. BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
  1620. refs += refs_to_mod;
  1621. if (refs > 0) {
  1622. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  1623. btrfs_set_extent_data_ref_count(leaf, dref, refs);
  1624. else
  1625. btrfs_set_shared_data_ref_count(leaf, sref, refs);
  1626. } else {
  1627. size = btrfs_extent_inline_ref_size(type);
  1628. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1629. ptr = (unsigned long)iref;
  1630. end = (unsigned long)ei + item_size;
  1631. if (ptr + size < end)
  1632. memmove_extent_buffer(leaf, ptr, ptr + size,
  1633. end - ptr - size);
  1634. item_size -= size;
  1635. btrfs_truncate_item(root, path, item_size, 1);
  1636. }
  1637. btrfs_mark_buffer_dirty(leaf);
  1638. }
  1639. static noinline_for_stack
  1640. int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
  1641. struct btrfs_root *root,
  1642. struct btrfs_path *path,
  1643. u64 bytenr, u64 num_bytes, u64 parent,
  1644. u64 root_objectid, u64 owner,
  1645. u64 offset, int refs_to_add,
  1646. struct btrfs_delayed_extent_op *extent_op)
  1647. {
  1648. struct btrfs_extent_inline_ref *iref;
  1649. int ret;
  1650. ret = lookup_inline_extent_backref(trans, root, path, &iref,
  1651. bytenr, num_bytes, parent,
  1652. root_objectid, owner, offset, 1);
  1653. if (ret == 0) {
  1654. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
  1655. update_inline_extent_backref(root, path, iref,
  1656. refs_to_add, extent_op);
  1657. } else if (ret == -ENOENT) {
  1658. setup_inline_extent_backref(root, path, iref, parent,
  1659. root_objectid, owner, offset,
  1660. refs_to_add, extent_op);
  1661. ret = 0;
  1662. }
  1663. return ret;
  1664. }
  1665. static int insert_extent_backref(struct btrfs_trans_handle *trans,
  1666. struct btrfs_root *root,
  1667. struct btrfs_path *path,
  1668. u64 bytenr, u64 parent, u64 root_objectid,
  1669. u64 owner, u64 offset, int refs_to_add)
  1670. {
  1671. int ret;
  1672. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1673. BUG_ON(refs_to_add != 1);
  1674. ret = insert_tree_block_ref(trans, root, path, bytenr,
  1675. parent, root_objectid);
  1676. } else {
  1677. ret = insert_extent_data_ref(trans, root, path, bytenr,
  1678. parent, root_objectid,
  1679. owner, offset, refs_to_add);
  1680. }
  1681. return ret;
  1682. }
  1683. static int remove_extent_backref(struct btrfs_trans_handle *trans,
  1684. struct btrfs_root *root,
  1685. struct btrfs_path *path,
  1686. struct btrfs_extent_inline_ref *iref,
  1687. int refs_to_drop, int is_data)
  1688. {
  1689. int ret = 0;
  1690. BUG_ON(!is_data && refs_to_drop != 1);
  1691. if (iref) {
  1692. update_inline_extent_backref(root, path, iref,
  1693. -refs_to_drop, NULL);
  1694. } else if (is_data) {
  1695. ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
  1696. } else {
  1697. ret = btrfs_del_item(trans, root, path);
  1698. }
  1699. return ret;
  1700. }
  1701. static int btrfs_issue_discard(struct block_device *bdev,
  1702. u64 start, u64 len)
  1703. {
  1704. return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
  1705. }
  1706. static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
  1707. u64 num_bytes, u64 *actual_bytes)
  1708. {
  1709. int ret;
  1710. u64 discarded_bytes = 0;
  1711. struct btrfs_bio *bbio = NULL;
  1712. /* Tell the block device(s) that the sectors can be discarded */
  1713. ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
  1714. bytenr, &num_bytes, &bbio, 0);
  1715. /* Error condition is -ENOMEM */
  1716. if (!ret) {
  1717. struct btrfs_bio_stripe *stripe = bbio->stripes;
  1718. int i;
  1719. for (i = 0; i < bbio->num_stripes; i++, stripe++) {
  1720. if (!stripe->dev->can_discard)
  1721. continue;
  1722. ret = btrfs_issue_discard(stripe->dev->bdev,
  1723. stripe->physical,
  1724. stripe->length);
  1725. if (!ret)
  1726. discarded_bytes += stripe->length;
  1727. else if (ret != -EOPNOTSUPP)
  1728. break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
  1729. /*
  1730. * Just in case we get back EOPNOTSUPP for some reason,
  1731. * just ignore the return value so we don't screw up
  1732. * people calling discard_extent.
  1733. */
  1734. ret = 0;
  1735. }
  1736. kfree(bbio);
  1737. }
  1738. if (actual_bytes)
  1739. *actual_bytes = discarded_bytes;
  1740. if (ret == -EOPNOTSUPP)
  1741. ret = 0;
  1742. return ret;
  1743. }
  1744. /* Can return -ENOMEM */
  1745. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1746. struct btrfs_root *root,
  1747. u64 bytenr, u64 num_bytes, u64 parent,
  1748. u64 root_objectid, u64 owner, u64 offset, int for_cow)
  1749. {
  1750. int ret;
  1751. struct btrfs_fs_info *fs_info = root->fs_info;
  1752. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
  1753. root_objectid == BTRFS_TREE_LOG_OBJECTID);
  1754. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1755. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  1756. num_bytes,
  1757. parent, root_objectid, (int)owner,
  1758. BTRFS_ADD_DELAYED_REF, NULL, for_cow);
  1759. } else {
  1760. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  1761. num_bytes,
  1762. parent, root_objectid, owner, offset,
  1763. BTRFS_ADD_DELAYED_REF, NULL, for_cow);
  1764. }
  1765. return ret;
  1766. }
  1767. static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1768. struct btrfs_root *root,
  1769. u64 bytenr, u64 num_bytes,
  1770. u64 parent, u64 root_objectid,
  1771. u64 owner, u64 offset, int refs_to_add,
  1772. struct btrfs_delayed_extent_op *extent_op)
  1773. {
  1774. struct btrfs_path *path;
  1775. struct extent_buffer *leaf;
  1776. struct btrfs_extent_item *item;
  1777. u64 refs;
  1778. int ret;
  1779. path = btrfs_alloc_path();
  1780. if (!path)
  1781. return -ENOMEM;
  1782. path->reada = 1;
  1783. path->leave_spinning = 1;
  1784. /* this will setup the path even if it fails to insert the back ref */
  1785. ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
  1786. path, bytenr, num_bytes, parent,
  1787. root_objectid, owner, offset,
  1788. refs_to_add, extent_op);
  1789. if (ret != -EAGAIN)
  1790. goto out;
  1791. leaf = path->nodes[0];
  1792. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1793. refs = btrfs_extent_refs(leaf, item);
  1794. btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
  1795. if (extent_op)
  1796. __run_delayed_extent_op(extent_op, leaf, item);
  1797. btrfs_mark_buffer_dirty(leaf);
  1798. btrfs_release_path(path);
  1799. path->reada = 1;
  1800. path->leave_spinning = 1;
  1801. /* now insert the actual backref */
  1802. ret = insert_extent_backref(trans, root->fs_info->extent_root,
  1803. path, bytenr, parent, root_objectid,
  1804. owner, offset, refs_to_add);
  1805. if (ret)
  1806. btrfs_abort_transaction(trans, root, ret);
  1807. out:
  1808. btrfs_free_path(path);
  1809. return ret;
  1810. }
  1811. static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
  1812. struct btrfs_root *root,
  1813. struct btrfs_delayed_ref_node *node,
  1814. struct btrfs_delayed_extent_op *extent_op,
  1815. int insert_reserved)
  1816. {
  1817. int ret = 0;
  1818. struct btrfs_delayed_data_ref *ref;
  1819. struct btrfs_key ins;
  1820. u64 parent = 0;
  1821. u64 ref_root = 0;
  1822. u64 flags = 0;
  1823. ins.objectid = node->bytenr;
  1824. ins.offset = node->num_bytes;
  1825. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1826. ref = btrfs_delayed_node_to_data_ref(node);
  1827. trace_run_delayed_data_ref(node, ref, node->action);
  1828. if (node->type == BTRFS_SHARED_DATA_REF_KEY)
  1829. parent = ref->parent;
  1830. else
  1831. ref_root = ref->root;
  1832. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1833. if (extent_op)
  1834. flags |= extent_op->flags_to_set;
  1835. ret = alloc_reserved_file_extent(trans, root,
  1836. parent, ref_root, flags,
  1837. ref->objectid, ref->offset,
  1838. &ins, node->ref_mod);
  1839. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1840. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  1841. node->num_bytes, parent,
  1842. ref_root, ref->objectid,
  1843. ref->offset, node->ref_mod,
  1844. extent_op);
  1845. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1846. ret = __btrfs_free_extent(trans, root, node->bytenr,
  1847. node->num_bytes, parent,
  1848. ref_root, ref->objectid,
  1849. ref->offset, node->ref_mod,
  1850. extent_op);
  1851. } else {
  1852. BUG();
  1853. }
  1854. return ret;
  1855. }
  1856. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  1857. struct extent_buffer *leaf,
  1858. struct btrfs_extent_item *ei)
  1859. {
  1860. u64 flags = btrfs_extent_flags(leaf, ei);
  1861. if (extent_op->update_flags) {
  1862. flags |= extent_op->flags_to_set;
  1863. btrfs_set_extent_flags(leaf, ei, flags);
  1864. }
  1865. if (extent_op->update_key) {
  1866. struct btrfs_tree_block_info *bi;
  1867. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
  1868. bi = (struct btrfs_tree_block_info *)(ei + 1);
  1869. btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
  1870. }
  1871. }
  1872. static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
  1873. struct btrfs_root *root,
  1874. struct btrfs_delayed_ref_node *node,
  1875. struct btrfs_delayed_extent_op *extent_op)
  1876. {
  1877. struct btrfs_key key;
  1878. struct btrfs_path *path;
  1879. struct btrfs_extent_item *ei;
  1880. struct extent_buffer *leaf;
  1881. u32 item_size;
  1882. int ret;
  1883. int err = 0;
  1884. int metadata = !extent_op->is_data;
  1885. if (trans->aborted)
  1886. return 0;
  1887. if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
  1888. metadata = 0;
  1889. path = btrfs_alloc_path();
  1890. if (!path)
  1891. return -ENOMEM;
  1892. key.objectid = node->bytenr;
  1893. if (metadata) {
  1894. key.type = BTRFS_METADATA_ITEM_KEY;
  1895. key.offset = extent_op->level;
  1896. } else {
  1897. key.type = BTRFS_EXTENT_ITEM_KEY;
  1898. key.offset = node->num_bytes;
  1899. }
  1900. again:
  1901. path->reada = 1;
  1902. path->leave_spinning = 1;
  1903. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
  1904. path, 0, 1);
  1905. if (ret < 0) {
  1906. err = ret;
  1907. goto out;
  1908. }
  1909. if (ret > 0) {
  1910. if (metadata) {
  1911. if (path->slots[0] > 0) {
  1912. path->slots[0]--;
  1913. btrfs_item_key_to_cpu(path->nodes[0], &key,
  1914. path->slots[0]);
  1915. if (key.objectid == node->bytenr &&
  1916. key.type == BTRFS_EXTENT_ITEM_KEY &&
  1917. key.offset == node->num_bytes)
  1918. ret = 0;
  1919. }
  1920. if (ret > 0) {
  1921. btrfs_release_path(path);
  1922. metadata = 0;
  1923. key.objectid = node->bytenr;
  1924. key.offset = node->num_bytes;
  1925. key.type = BTRFS_EXTENT_ITEM_KEY;
  1926. goto again;
  1927. }
  1928. } else {
  1929. err = -EIO;
  1930. goto out;
  1931. }
  1932. }
  1933. leaf = path->nodes[0];
  1934. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1935. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1936. if (item_size < sizeof(*ei)) {
  1937. ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
  1938. path, (u64)-1, 0);
  1939. if (ret < 0) {
  1940. err = ret;
  1941. goto out;
  1942. }
  1943. leaf = path->nodes[0];
  1944. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1945. }
  1946. #endif
  1947. BUG_ON(item_size < sizeof(*ei));
  1948. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1949. __run_delayed_extent_op(extent_op, leaf, ei);
  1950. btrfs_mark_buffer_dirty(leaf);
  1951. out:
  1952. btrfs_free_path(path);
  1953. return err;
  1954. }
  1955. static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
  1956. struct btrfs_root *root,
  1957. struct btrfs_delayed_ref_node *node,
  1958. struct btrfs_delayed_extent_op *extent_op,
  1959. int insert_reserved)
  1960. {
  1961. int ret = 0;
  1962. struct btrfs_delayed_tree_ref *ref;
  1963. struct btrfs_key ins;
  1964. u64 parent = 0;
  1965. u64 ref_root = 0;
  1966. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  1967. SKINNY_METADATA);
  1968. ref = btrfs_delayed_node_to_tree_ref(node);
  1969. trace_run_delayed_tree_ref(node, ref, node->action);
  1970. if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  1971. parent = ref->parent;
  1972. else
  1973. ref_root = ref->root;
  1974. ins.objectid = node->bytenr;
  1975. if (skinny_metadata) {
  1976. ins.offset = ref->level;
  1977. ins.type = BTRFS_METADATA_ITEM_KEY;
  1978. } else {
  1979. ins.offset = node->num_bytes;
  1980. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1981. }
  1982. BUG_ON(node->ref_mod != 1);
  1983. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1984. BUG_ON(!extent_op || !extent_op->update_flags);
  1985. ret = alloc_reserved_tree_block(trans, root,
  1986. parent, ref_root,
  1987. extent_op->flags_to_set,
  1988. &extent_op->key,
  1989. ref->level, &ins);
  1990. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1991. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  1992. node->num_bytes, parent, ref_root,
  1993. ref->level, 0, 1, extent_op);
  1994. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1995. ret = __btrfs_free_extent(trans, root, node->bytenr,
  1996. node->num_bytes, parent, ref_root,
  1997. ref->level, 0, 1, extent_op);
  1998. } else {
  1999. BUG();
  2000. }
  2001. return ret;
  2002. }
  2003. /* helper function to actually process a single delayed ref entry */
  2004. static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
  2005. struct btrfs_root *root,
  2006. struct btrfs_delayed_ref_node *node,
  2007. struct btrfs_delayed_extent_op *extent_op,
  2008. int insert_reserved)
  2009. {
  2010. int ret = 0;
  2011. if (trans->aborted) {
  2012. if (insert_reserved)
  2013. btrfs_pin_extent(root, node->bytenr,
  2014. node->num_bytes, 1);
  2015. return 0;
  2016. }
  2017. if (btrfs_delayed_ref_is_head(node)) {
  2018. struct btrfs_delayed_ref_head *head;
  2019. /*
  2020. * we've hit the end of the chain and we were supposed
  2021. * to insert this extent into the tree. But, it got
  2022. * deleted before we ever needed to insert it, so all
  2023. * we have to do is clean up the accounting
  2024. */
  2025. BUG_ON(extent_op);
  2026. head = btrfs_delayed_node_to_head(node);
  2027. trace_run_delayed_ref_head(node, head, node->action);
  2028. if (insert_reserved) {
  2029. btrfs_pin_extent(root, node->bytenr,
  2030. node->num_bytes, 1);
  2031. if (head->is_data) {
  2032. ret = btrfs_del_csums(trans, root,
  2033. node->bytenr,
  2034. node->num_bytes);
  2035. }
  2036. }
  2037. return ret;
  2038. }
  2039. if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
  2040. node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  2041. ret = run_delayed_tree_ref(trans, root, node, extent_op,
  2042. insert_reserved);
  2043. else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
  2044. node->type == BTRFS_SHARED_DATA_REF_KEY)
  2045. ret = run_delayed_data_ref(trans, root, node, extent_op,
  2046. insert_reserved);
  2047. else
  2048. BUG();
  2049. return ret;
  2050. }
  2051. static noinline struct btrfs_delayed_ref_node *
  2052. select_delayed_ref(struct btrfs_delayed_ref_head *head)
  2053. {
  2054. struct rb_node *node;
  2055. struct btrfs_delayed_ref_node *ref;
  2056. int action = BTRFS_ADD_DELAYED_REF;
  2057. again:
  2058. /*
  2059. * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
  2060. * this prevents ref count from going down to zero when
  2061. * there still are pending delayed ref.
  2062. */
  2063. node = rb_prev(&head->node.rb_node);
  2064. while (1) {
  2065. if (!node)
  2066. break;
  2067. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  2068. rb_node);
  2069. if (ref->bytenr != head->node.bytenr)
  2070. break;
  2071. if (ref->action == action)
  2072. return ref;
  2073. node = rb_prev(node);
  2074. }
  2075. if (action == BTRFS_ADD_DELAYED_REF) {
  2076. action = BTRFS_DROP_DELAYED_REF;
  2077. goto again;
  2078. }
  2079. return NULL;
  2080. }
  2081. /*
  2082. * Returns 0 on success or if called with an already aborted transaction.
  2083. * Returns -ENOMEM or -EIO on failure and will abort the transaction.
  2084. */
  2085. static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
  2086. struct btrfs_root *root,
  2087. struct list_head *cluster)
  2088. {
  2089. struct btrfs_delayed_ref_root *delayed_refs;
  2090. struct btrfs_delayed_ref_node *ref;
  2091. struct btrfs_delayed_ref_head *locked_ref = NULL;
  2092. struct btrfs_delayed_extent_op *extent_op;
  2093. struct btrfs_fs_info *fs_info = root->fs_info;
  2094. int ret;
  2095. int count = 0;
  2096. int must_insert_reserved = 0;
  2097. delayed_refs = &trans->transaction->delayed_refs;
  2098. while (1) {
  2099. if (!locked_ref) {
  2100. /* pick a new head ref from the cluster list */
  2101. if (list_empty(cluster))
  2102. break;
  2103. locked_ref = list_entry(cluster->next,
  2104. struct btrfs_delayed_ref_head, cluster);
  2105. /* grab the lock that says we are going to process
  2106. * all the refs for this head */
  2107. ret = btrfs_delayed_ref_lock(trans, locked_ref);
  2108. /*
  2109. * we may have dropped the spin lock to get the head
  2110. * mutex lock, and that might have given someone else
  2111. * time to free the head. If that's true, it has been
  2112. * removed from our list and we can move on.
  2113. */
  2114. if (ret == -EAGAIN) {
  2115. locked_ref = NULL;
  2116. count++;
  2117. continue;
  2118. }
  2119. }
  2120. /*
  2121. * We need to try and merge add/drops of the same ref since we
  2122. * can run into issues with relocate dropping the implicit ref
  2123. * and then it being added back again before the drop can
  2124. * finish. If we merged anything we need to re-loop so we can
  2125. * get a good ref.
  2126. */
  2127. btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
  2128. locked_ref);
  2129. /*
  2130. * locked_ref is the head node, so we have to go one
  2131. * node back for any delayed ref updates
  2132. */
  2133. ref = select_delayed_ref(locked_ref);
  2134. if (ref && ref->seq &&
  2135. btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
  2136. /*
  2137. * there are still refs with lower seq numbers in the
  2138. * process of being added. Don't run this ref yet.
  2139. */
  2140. list_del_init(&locked_ref->cluster);
  2141. btrfs_delayed_ref_unlock(locked_ref);
  2142. locked_ref = NULL;
  2143. delayed_refs->num_heads_ready++;
  2144. spin_unlock(&delayed_refs->lock);
  2145. cond_resched();
  2146. spin_lock(&delayed_refs->lock);
  2147. continue;
  2148. }
  2149. /*
  2150. * record the must insert reserved flag before we
  2151. * drop the spin lock.
  2152. */
  2153. must_insert_reserved = locked_ref->must_insert_reserved;
  2154. locked_ref->must_insert_reserved = 0;
  2155. extent_op = locked_ref->extent_op;
  2156. locked_ref->extent_op = NULL;
  2157. if (!ref) {
  2158. /* All delayed refs have been processed, Go ahead
  2159. * and send the head node to run_one_delayed_ref,
  2160. * so that any accounting fixes can happen
  2161. */
  2162. ref = &locked_ref->node;
  2163. if (extent_op && must_insert_reserved) {
  2164. btrfs_free_delayed_extent_op(extent_op);
  2165. extent_op = NULL;
  2166. }
  2167. if (extent_op) {
  2168. spin_unlock(&delayed_refs->lock);
  2169. ret = run_delayed_extent_op(trans, root,
  2170. ref, extent_op);
  2171. btrfs_free_delayed_extent_op(extent_op);
  2172. if (ret) {
  2173. /*
  2174. * Need to reset must_insert_reserved if
  2175. * there was an error so the abort stuff
  2176. * can cleanup the reserved space
  2177. * properly.
  2178. */
  2179. if (must_insert_reserved)
  2180. locked_ref->must_insert_reserved = 1;
  2181. btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
  2182. spin_lock(&delayed_refs->lock);
  2183. btrfs_delayed_ref_unlock(locked_ref);
  2184. return ret;
  2185. }
  2186. goto next;
  2187. }
  2188. }
  2189. ref->in_tree = 0;
  2190. rb_erase(&ref->rb_node, &delayed_refs->root);
  2191. delayed_refs->num_entries--;
  2192. if (!btrfs_delayed_ref_is_head(ref)) {
  2193. /*
  2194. * when we play the delayed ref, also correct the
  2195. * ref_mod on head
  2196. */
  2197. switch (ref->action) {
  2198. case BTRFS_ADD_DELAYED_REF:
  2199. case BTRFS_ADD_DELAYED_EXTENT:
  2200. locked_ref->node.ref_mod -= ref->ref_mod;
  2201. break;
  2202. case BTRFS_DROP_DELAYED_REF:
  2203. locked_ref->node.ref_mod += ref->ref_mod;
  2204. break;
  2205. default:
  2206. WARN_ON(1);
  2207. }
  2208. } else {
  2209. list_del_init(&locked_ref->cluster);
  2210. }
  2211. spin_unlock(&delayed_refs->lock);
  2212. ret = run_one_delayed_ref(trans, root, ref, extent_op,
  2213. must_insert_reserved);
  2214. btrfs_free_delayed_extent_op(extent_op);
  2215. if (ret) {
  2216. btrfs_delayed_ref_unlock(locked_ref);
  2217. btrfs_put_delayed_ref(ref);
  2218. btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
  2219. spin_lock(&delayed_refs->lock);
  2220. return ret;
  2221. }
  2222. /*
  2223. * If this node is a head, that means all the refs in this head
  2224. * have been dealt with, and we will pick the next head to deal
  2225. * with, so we must unlock the head and drop it from the cluster
  2226. * list before we release it.
  2227. */
  2228. if (btrfs_delayed_ref_is_head(ref)) {
  2229. btrfs_delayed_ref_unlock(locked_ref);
  2230. locked_ref = NULL;
  2231. }
  2232. btrfs_put_delayed_ref(ref);
  2233. count++;
  2234. next:
  2235. cond_resched();
  2236. spin_lock(&delayed_refs->lock);
  2237. }
  2238. return count;
  2239. }
  2240. #ifdef SCRAMBLE_DELAYED_REFS
  2241. /*
  2242. * Normally delayed refs get processed in ascending bytenr order. This
  2243. * correlates in most cases to the order added. To expose dependencies on this
  2244. * order, we start to process the tree in the middle instead of the beginning
  2245. */
  2246. static u64 find_middle(struct rb_root *root)
  2247. {
  2248. struct rb_node *n = root->rb_node;
  2249. struct btrfs_delayed_ref_node *entry;
  2250. int alt = 1;
  2251. u64 middle;
  2252. u64 first = 0, last = 0;
  2253. n = rb_first(root);
  2254. if (n) {
  2255. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2256. first = entry->bytenr;
  2257. }
  2258. n = rb_last(root);
  2259. if (n) {
  2260. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2261. last = entry->bytenr;
  2262. }
  2263. n = root->rb_node;
  2264. while (n) {
  2265. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2266. WARN_ON(!entry->in_tree);
  2267. middle = entry->bytenr;
  2268. if (alt)
  2269. n = n->rb_left;
  2270. else
  2271. n = n->rb_right;
  2272. alt = 1 - alt;
  2273. }
  2274. return middle;
  2275. }
  2276. #endif
  2277. int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
  2278. struct btrfs_fs_info *fs_info)
  2279. {
  2280. struct qgroup_update *qgroup_update;
  2281. int ret = 0;
  2282. if (list_empty(&trans->qgroup_ref_list) !=
  2283. !trans->delayed_ref_elem.seq) {
  2284. /* list without seq or seq without list */
  2285. btrfs_err(fs_info,
  2286. "qgroup accounting update error, list is%s empty, seq is %#x.%x",
  2287. list_empty(&trans->qgroup_ref_list) ? "" : " not",
  2288. (u32)(trans->delayed_ref_elem.seq >> 32),
  2289. (u32)trans->delayed_ref_elem.seq);
  2290. BUG();
  2291. }
  2292. if (!trans->delayed_ref_elem.seq)
  2293. return 0;
  2294. while (!list_empty(&trans->qgroup_ref_list)) {
  2295. qgroup_update = list_first_entry(&trans->qgroup_ref_list,
  2296. struct qgroup_update, list);
  2297. list_del(&qgroup_update->list);
  2298. if (!ret)
  2299. ret = btrfs_qgroup_account_ref(
  2300. trans, fs_info, qgroup_update->node,
  2301. qgroup_update->extent_op);
  2302. kfree(qgroup_update);
  2303. }
  2304. btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
  2305. return ret;
  2306. }
  2307. static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
  2308. int count)
  2309. {
  2310. int val = atomic_read(&delayed_refs->ref_seq);
  2311. if (val < seq || val >= seq + count)
  2312. return 1;
  2313. return 0;
  2314. }
  2315. static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
  2316. {
  2317. u64 num_bytes;
  2318. num_bytes = heads * (sizeof(struct btrfs_extent_item) +
  2319. sizeof(struct btrfs_extent_inline_ref));
  2320. if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
  2321. num_bytes += heads * sizeof(struct btrfs_tree_block_info);
  2322. /*
  2323. * We don't ever fill up leaves all the way so multiply by 2 just to be
  2324. * closer to what we're really going to want to ouse.
  2325. */
  2326. return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
  2327. }
  2328. int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
  2329. struct btrfs_root *root)
  2330. {
  2331. struct btrfs_block_rsv *global_rsv;
  2332. u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
  2333. u64 num_bytes;
  2334. int ret = 0;
  2335. num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  2336. num_heads = heads_to_leaves(root, num_heads);
  2337. if (num_heads > 1)
  2338. num_bytes += (num_heads - 1) * root->leafsize;
  2339. num_bytes <<= 1;
  2340. global_rsv = &root->fs_info->global_block_rsv;
  2341. /*
  2342. * If we can't allocate any more chunks lets make sure we have _lots_ of
  2343. * wiggle room since running delayed refs can create more delayed refs.
  2344. */
  2345. if (global_rsv->space_info->full)
  2346. num_bytes <<= 1;
  2347. spin_lock(&global_rsv->lock);
  2348. if (global_rsv->reserved <= num_bytes)
  2349. ret = 1;
  2350. spin_unlock(&global_rsv->lock);
  2351. return ret;
  2352. }
  2353. /*
  2354. * this starts processing the delayed reference count updates and
  2355. * extent insertions we have queued up so far. count can be
  2356. * 0, which means to process everything in the tree at the start
  2357. * of the run (but not newly added entries), or it can be some target
  2358. * number you'd like to process.
  2359. *
  2360. * Returns 0 on success or if called with an aborted transaction
  2361. * Returns <0 on error and aborts the transaction
  2362. */
  2363. int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2364. struct btrfs_root *root, unsigned long count)
  2365. {
  2366. struct rb_node *node;
  2367. struct btrfs_delayed_ref_root *delayed_refs;
  2368. struct btrfs_delayed_ref_node *ref;
  2369. struct list_head cluster;
  2370. int ret;
  2371. u64 delayed_start;
  2372. int run_all = count == (unsigned long)-1;
  2373. int run_most = 0;
  2374. int loops;
  2375. /* We'll clean this up in btrfs_cleanup_transaction */
  2376. if (trans->aborted)
  2377. return 0;
  2378. if (root == root->fs_info->extent_root)
  2379. root = root->fs_info->tree_root;
  2380. btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
  2381. delayed_refs = &trans->transaction->delayed_refs;
  2382. INIT_LIST_HEAD(&cluster);
  2383. if (count == 0) {
  2384. count = delayed_refs->num_entries * 2;
  2385. run_most = 1;
  2386. }
  2387. if (!run_all && !run_most) {
  2388. int old;
  2389. int seq = atomic_read(&delayed_refs->ref_seq);
  2390. progress:
  2391. old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
  2392. if (old) {
  2393. DEFINE_WAIT(__wait);
  2394. if (delayed_refs->flushing ||
  2395. !btrfs_should_throttle_delayed_refs(trans, root))
  2396. return 0;
  2397. prepare_to_wait(&delayed_refs->wait, &__wait,
  2398. TASK_UNINTERRUPTIBLE);
  2399. old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
  2400. if (old) {
  2401. schedule();
  2402. finish_wait(&delayed_refs->wait, &__wait);
  2403. if (!refs_newer(delayed_refs, seq, 256))
  2404. goto progress;
  2405. else
  2406. return 0;
  2407. } else {
  2408. finish_wait(&delayed_refs->wait, &__wait);
  2409. goto again;
  2410. }
  2411. }
  2412. } else {
  2413. atomic_inc(&delayed_refs->procs_running_refs);
  2414. }
  2415. again:
  2416. loops = 0;
  2417. spin_lock(&delayed_refs->lock);
  2418. #ifdef SCRAMBLE_DELAYED_REFS
  2419. delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
  2420. #endif
  2421. while (1) {
  2422. if (!(run_all || run_most) &&
  2423. !btrfs_should_throttle_delayed_refs(trans, root))
  2424. break;
  2425. /*
  2426. * go find something we can process in the rbtree. We start at
  2427. * the beginning of the tree, and then build a cluster
  2428. * of refs to process starting at the first one we are able to
  2429. * lock
  2430. */
  2431. delayed_start = delayed_refs->run_delayed_start;
  2432. ret = btrfs_find_ref_cluster(trans, &cluster,
  2433. delayed_refs->run_delayed_start);
  2434. if (ret)
  2435. break;
  2436. ret = run_clustered_refs(trans, root, &cluster);
  2437. if (ret < 0) {
  2438. btrfs_release_ref_cluster(&cluster);
  2439. spin_unlock(&delayed_refs->lock);
  2440. btrfs_abort_transaction(trans, root, ret);
  2441. atomic_dec(&delayed_refs->procs_running_refs);
  2442. wake_up(&delayed_refs->wait);
  2443. return ret;
  2444. }
  2445. atomic_add(ret, &delayed_refs->ref_seq);
  2446. count -= min_t(unsigned long, ret, count);
  2447. if (count == 0)
  2448. break;
  2449. if (delayed_start >= delayed_refs->run_delayed_start) {
  2450. if (loops == 0) {
  2451. /*
  2452. * btrfs_find_ref_cluster looped. let's do one
  2453. * more cycle. if we don't run any delayed ref
  2454. * during that cycle (because we can't because
  2455. * all of them are blocked), bail out.
  2456. */
  2457. loops = 1;
  2458. } else {
  2459. /*
  2460. * no runnable refs left, stop trying
  2461. */
  2462. BUG_ON(run_all);
  2463. break;
  2464. }
  2465. }
  2466. if (ret) {
  2467. /* refs were run, let's reset staleness detection */
  2468. loops = 0;
  2469. }
  2470. }
  2471. if (run_all) {
  2472. if (!list_empty(&trans->new_bgs)) {
  2473. spin_unlock(&delayed_refs->lock);
  2474. btrfs_create_pending_block_groups(trans, root);
  2475. spin_lock(&delayed_refs->lock);
  2476. }
  2477. node = rb_first(&delayed_refs->root);
  2478. if (!node)
  2479. goto out;
  2480. count = (unsigned long)-1;
  2481. while (node) {
  2482. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  2483. rb_node);
  2484. if (btrfs_delayed_ref_is_head(ref)) {
  2485. struct btrfs_delayed_ref_head *head;
  2486. head = btrfs_delayed_node_to_head(ref);
  2487. atomic_inc(&ref->refs);
  2488. spin_unlock(&delayed_refs->lock);
  2489. /*
  2490. * Mutex was contended, block until it's
  2491. * released and try again
  2492. */
  2493. mutex_lock(&head->mutex);
  2494. mutex_unlock(&head->mutex);
  2495. btrfs_put_delayed_ref(ref);
  2496. cond_resched();
  2497. goto again;
  2498. }
  2499. node = rb_next(node);
  2500. }
  2501. spin_unlock(&delayed_refs->lock);
  2502. schedule_timeout(1);
  2503. goto again;
  2504. }
  2505. out:
  2506. atomic_dec(&delayed_refs->procs_running_refs);
  2507. smp_mb();
  2508. if (waitqueue_active(&delayed_refs->wait))
  2509. wake_up(&delayed_refs->wait);
  2510. spin_unlock(&delayed_refs->lock);
  2511. assert_qgroups_uptodate(trans);
  2512. return 0;
  2513. }
  2514. int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
  2515. struct btrfs_root *root,
  2516. u64 bytenr, u64 num_bytes, u64 flags,
  2517. int level, int is_data)
  2518. {
  2519. struct btrfs_delayed_extent_op *extent_op;
  2520. int ret;
  2521. extent_op = btrfs_alloc_delayed_extent_op();
  2522. if (!extent_op)
  2523. return -ENOMEM;
  2524. extent_op->flags_to_set = flags;
  2525. extent_op->update_flags = 1;
  2526. extent_op->update_key = 0;
  2527. extent_op->is_data = is_data ? 1 : 0;
  2528. extent_op->level = level;
  2529. ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
  2530. num_bytes, extent_op);
  2531. if (ret)
  2532. btrfs_free_delayed_extent_op(extent_op);
  2533. return ret;
  2534. }
  2535. static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
  2536. struct btrfs_root *root,
  2537. struct btrfs_path *path,
  2538. u64 objectid, u64 offset, u64 bytenr)
  2539. {
  2540. struct btrfs_delayed_ref_head *head;
  2541. struct btrfs_delayed_ref_node *ref;
  2542. struct btrfs_delayed_data_ref *data_ref;
  2543. struct btrfs_delayed_ref_root *delayed_refs;
  2544. struct rb_node *node;
  2545. int ret = 0;
  2546. ret = -ENOENT;
  2547. delayed_refs = &trans->transaction->delayed_refs;
  2548. spin_lock(&delayed_refs->lock);
  2549. head = btrfs_find_delayed_ref_head(trans, bytenr);
  2550. if (!head)
  2551. goto out;
  2552. if (!mutex_trylock(&head->mutex)) {
  2553. atomic_inc(&head->node.refs);
  2554. spin_unlock(&delayed_refs->lock);
  2555. btrfs_release_path(path);
  2556. /*
  2557. * Mutex was contended, block until it's released and let
  2558. * caller try again
  2559. */
  2560. mutex_lock(&head->mutex);
  2561. mutex_unlock(&head->mutex);
  2562. btrfs_put_delayed_ref(&head->node);
  2563. return -EAGAIN;
  2564. }
  2565. node = rb_prev(&head->node.rb_node);
  2566. if (!node)
  2567. goto out_unlock;
  2568. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  2569. if (ref->bytenr != bytenr)
  2570. goto out_unlock;
  2571. ret = 1;
  2572. if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
  2573. goto out_unlock;
  2574. data_ref = btrfs_delayed_node_to_data_ref(ref);
  2575. node = rb_prev(node);
  2576. if (node) {
  2577. int seq = ref->seq;
  2578. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  2579. if (ref->bytenr == bytenr && ref->seq == seq)
  2580. goto out_unlock;
  2581. }
  2582. if (data_ref->root != root->root_key.objectid ||
  2583. data_ref->objectid != objectid || data_ref->offset != offset)
  2584. goto out_unlock;
  2585. ret = 0;
  2586. out_unlock:
  2587. mutex_unlock(&head->mutex);
  2588. out:
  2589. spin_unlock(&delayed_refs->lock);
  2590. return ret;
  2591. }
  2592. static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
  2593. struct btrfs_root *root,
  2594. struct btrfs_path *path,
  2595. u64 objectid, u64 offset, u64 bytenr)
  2596. {
  2597. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2598. struct extent_buffer *leaf;
  2599. struct btrfs_extent_data_ref *ref;
  2600. struct btrfs_extent_inline_ref *iref;
  2601. struct btrfs_extent_item *ei;
  2602. struct btrfs_key key;
  2603. u32 item_size;
  2604. int ret;
  2605. key.objectid = bytenr;
  2606. key.offset = (u64)-1;
  2607. key.type = BTRFS_EXTENT_ITEM_KEY;
  2608. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  2609. if (ret < 0)
  2610. goto out;
  2611. BUG_ON(ret == 0); /* Corruption */
  2612. ret = -ENOENT;
  2613. if (path->slots[0] == 0)
  2614. goto out;
  2615. path->slots[0]--;
  2616. leaf = path->nodes[0];
  2617. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2618. if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
  2619. goto out;
  2620. ret = 1;
  2621. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2622. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2623. if (item_size < sizeof(*ei)) {
  2624. WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  2625. goto out;
  2626. }
  2627. #endif
  2628. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2629. if (item_size != sizeof(*ei) +
  2630. btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
  2631. goto out;
  2632. if (btrfs_extent_generation(leaf, ei) <=
  2633. btrfs_root_last_snapshot(&root->root_item))
  2634. goto out;
  2635. iref = (struct btrfs_extent_inline_ref *)(ei + 1);
  2636. if (btrfs_extent_inline_ref_type(leaf, iref) !=
  2637. BTRFS_EXTENT_DATA_REF_KEY)
  2638. goto out;
  2639. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  2640. if (btrfs_extent_refs(leaf, ei) !=
  2641. btrfs_extent_data_ref_count(leaf, ref) ||
  2642. btrfs_extent_data_ref_root(leaf, ref) !=
  2643. root->root_key.objectid ||
  2644. btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
  2645. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  2646. goto out;
  2647. ret = 0;
  2648. out:
  2649. return ret;
  2650. }
  2651. int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
  2652. struct btrfs_root *root,
  2653. u64 objectid, u64 offset, u64 bytenr)
  2654. {
  2655. struct btrfs_path *path;
  2656. int ret;
  2657. int ret2;
  2658. path = btrfs_alloc_path();
  2659. if (!path)
  2660. return -ENOENT;
  2661. do {
  2662. ret = check_committed_ref(trans, root, path, objectid,
  2663. offset, bytenr);
  2664. if (ret && ret != -ENOENT)
  2665. goto out;
  2666. ret2 = check_delayed_ref(trans, root, path, objectid,
  2667. offset, bytenr);
  2668. } while (ret2 == -EAGAIN);
  2669. if (ret2 && ret2 != -ENOENT) {
  2670. ret = ret2;
  2671. goto out;
  2672. }
  2673. if (ret != -ENOENT || ret2 != -ENOENT)
  2674. ret = 0;
  2675. out:
  2676. btrfs_free_path(path);
  2677. if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
  2678. WARN_ON(ret > 0);
  2679. return ret;
  2680. }
  2681. static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
  2682. struct btrfs_root *root,
  2683. struct extent_buffer *buf,
  2684. int full_backref, int inc, int for_cow)
  2685. {
  2686. u64 bytenr;
  2687. u64 num_bytes;
  2688. u64 parent;
  2689. u64 ref_root;
  2690. u32 nritems;
  2691. struct btrfs_key key;
  2692. struct btrfs_file_extent_item *fi;
  2693. int i;
  2694. int level;
  2695. int ret = 0;
  2696. int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
  2697. u64, u64, u64, u64, u64, u64, int);
  2698. ref_root = btrfs_header_owner(buf);
  2699. nritems = btrfs_header_nritems(buf);
  2700. level = btrfs_header_level(buf);
  2701. if (!root->ref_cows && level == 0)
  2702. return 0;
  2703. if (inc)
  2704. process_func = btrfs_inc_extent_ref;
  2705. else
  2706. process_func = btrfs_free_extent;
  2707. if (full_backref)
  2708. parent = buf->start;
  2709. else
  2710. parent = 0;
  2711. for (i = 0; i < nritems; i++) {
  2712. if (level == 0) {
  2713. btrfs_item_key_to_cpu(buf, &key, i);
  2714. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  2715. continue;
  2716. fi = btrfs_item_ptr(buf, i,
  2717. struct btrfs_file_extent_item);
  2718. if (btrfs_file_extent_type(buf, fi) ==
  2719. BTRFS_FILE_EXTENT_INLINE)
  2720. continue;
  2721. bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  2722. if (bytenr == 0)
  2723. continue;
  2724. num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
  2725. key.offset -= btrfs_file_extent_offset(buf, fi);
  2726. ret = process_func(trans, root, bytenr, num_bytes,
  2727. parent, ref_root, key.objectid,
  2728. key.offset, for_cow);
  2729. if (ret)
  2730. goto fail;
  2731. } else {
  2732. bytenr = btrfs_node_blockptr(buf, i);
  2733. num_bytes = btrfs_level_size(root, level - 1);
  2734. ret = process_func(trans, root, bytenr, num_bytes,
  2735. parent, ref_root, level - 1, 0,
  2736. for_cow);
  2737. if (ret)
  2738. goto fail;
  2739. }
  2740. }
  2741. return 0;
  2742. fail:
  2743. return ret;
  2744. }
  2745. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2746. struct extent_buffer *buf, int full_backref, int for_cow)
  2747. {
  2748. return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
  2749. }
  2750. int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2751. struct extent_buffer *buf, int full_backref, int for_cow)
  2752. {
  2753. return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
  2754. }
  2755. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  2756. struct btrfs_root *root,
  2757. struct btrfs_path *path,
  2758. struct btrfs_block_group_cache *cache)
  2759. {
  2760. int ret;
  2761. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2762. unsigned long bi;
  2763. struct extent_buffer *leaf;
  2764. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  2765. if (ret < 0)
  2766. goto fail;
  2767. BUG_ON(ret); /* Corruption */
  2768. leaf = path->nodes[0];
  2769. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  2770. write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
  2771. btrfs_mark_buffer_dirty(leaf);
  2772. btrfs_release_path(path);
  2773. fail:
  2774. if (ret) {
  2775. btrfs_abort_transaction(trans, root, ret);
  2776. return ret;
  2777. }
  2778. return 0;
  2779. }
  2780. static struct btrfs_block_group_cache *
  2781. next_block_group(struct btrfs_root *root,
  2782. struct btrfs_block_group_cache *cache)
  2783. {
  2784. struct rb_node *node;
  2785. spin_lock(&root->fs_info->block_group_cache_lock);
  2786. node = rb_next(&cache->cache_node);
  2787. btrfs_put_block_group(cache);
  2788. if (node) {
  2789. cache = rb_entry(node, struct btrfs_block_group_cache,
  2790. cache_node);
  2791. btrfs_get_block_group(cache);
  2792. } else
  2793. cache = NULL;
  2794. spin_unlock(&root->fs_info->block_group_cache_lock);
  2795. return cache;
  2796. }
  2797. static int cache_save_setup(struct btrfs_block_group_cache *block_group,
  2798. struct btrfs_trans_handle *trans,
  2799. struct btrfs_path *path)
  2800. {
  2801. struct btrfs_root *root = block_group->fs_info->tree_root;
  2802. struct inode *inode = NULL;
  2803. u64 alloc_hint = 0;
  2804. int dcs = BTRFS_DC_ERROR;
  2805. int num_pages = 0;
  2806. int retries = 0;
  2807. int ret = 0;
  2808. /*
  2809. * If this block group is smaller than 100 megs don't bother caching the
  2810. * block group.
  2811. */
  2812. if (block_group->key.offset < (100 * 1024 * 1024)) {
  2813. spin_lock(&block_group->lock);
  2814. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  2815. spin_unlock(&block_group->lock);
  2816. return 0;
  2817. }
  2818. again:
  2819. inode = lookup_free_space_inode(root, block_group, path);
  2820. if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
  2821. ret = PTR_ERR(inode);
  2822. btrfs_release_path(path);
  2823. goto out;
  2824. }
  2825. if (IS_ERR(inode)) {
  2826. BUG_ON(retries);
  2827. retries++;
  2828. if (block_group->ro)
  2829. goto out_free;
  2830. ret = create_free_space_inode(root, trans, block_group, path);
  2831. if (ret)
  2832. goto out_free;
  2833. goto again;
  2834. }
  2835. /* We've already setup this transaction, go ahead and exit */
  2836. if (block_group->cache_generation == trans->transid &&
  2837. i_size_read(inode)) {
  2838. dcs = BTRFS_DC_SETUP;
  2839. goto out_put;
  2840. }
  2841. /*
  2842. * We want to set the generation to 0, that way if anything goes wrong
  2843. * from here on out we know not to trust this cache when we load up next
  2844. * time.
  2845. */
  2846. BTRFS_I(inode)->generation = 0;
  2847. ret = btrfs_update_inode(trans, root, inode);
  2848. WARN_ON(ret);
  2849. if (i_size_read(inode) > 0) {
  2850. ret = btrfs_check_trunc_cache_free_space(root,
  2851. &root->fs_info->global_block_rsv);
  2852. if (ret)
  2853. goto out_put;
  2854. ret = btrfs_truncate_free_space_cache(root, trans, inode);
  2855. if (ret)
  2856. goto out_put;
  2857. }
  2858. spin_lock(&block_group->lock);
  2859. if (block_group->cached != BTRFS_CACHE_FINISHED ||
  2860. !btrfs_test_opt(root, SPACE_CACHE)) {
  2861. /*
  2862. * don't bother trying to write stuff out _if_
  2863. * a) we're not cached,
  2864. * b) we're with nospace_cache mount option.
  2865. */
  2866. dcs = BTRFS_DC_WRITTEN;
  2867. spin_unlock(&block_group->lock);
  2868. goto out_put;
  2869. }
  2870. spin_unlock(&block_group->lock);
  2871. /*
  2872. * Try to preallocate enough space based on how big the block group is.
  2873. * Keep in mind this has to include any pinned space which could end up
  2874. * taking up quite a bit since it's not folded into the other space
  2875. * cache.
  2876. */
  2877. num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
  2878. if (!num_pages)
  2879. num_pages = 1;
  2880. num_pages *= 16;
  2881. num_pages *= PAGE_CACHE_SIZE;
  2882. ret = btrfs_check_data_free_space(inode, num_pages);
  2883. if (ret)
  2884. goto out_put;
  2885. ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
  2886. num_pages, num_pages,
  2887. &alloc_hint);
  2888. if (!ret)
  2889. dcs = BTRFS_DC_SETUP;
  2890. btrfs_free_reserved_data_space(inode, num_pages);
  2891. out_put:
  2892. iput(inode);
  2893. out_free:
  2894. btrfs_release_path(path);
  2895. out:
  2896. spin_lock(&block_group->lock);
  2897. if (!ret && dcs == BTRFS_DC_SETUP)
  2898. block_group->cache_generation = trans->transid;
  2899. block_group->disk_cache_state = dcs;
  2900. spin_unlock(&block_group->lock);
  2901. return ret;
  2902. }
  2903. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  2904. struct btrfs_root *root)
  2905. {
  2906. struct btrfs_block_group_cache *cache;
  2907. int err = 0;
  2908. struct btrfs_path *path;
  2909. u64 last = 0;
  2910. path = btrfs_alloc_path();
  2911. if (!path)
  2912. return -ENOMEM;
  2913. again:
  2914. while (1) {
  2915. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2916. while (cache) {
  2917. if (cache->disk_cache_state == BTRFS_DC_CLEAR)
  2918. break;
  2919. cache = next_block_group(root, cache);
  2920. }
  2921. if (!cache) {
  2922. if (last == 0)
  2923. break;
  2924. last = 0;
  2925. continue;
  2926. }
  2927. err = cache_save_setup(cache, trans, path);
  2928. last = cache->key.objectid + cache->key.offset;
  2929. btrfs_put_block_group(cache);
  2930. }
  2931. while (1) {
  2932. if (last == 0) {
  2933. err = btrfs_run_delayed_refs(trans, root,
  2934. (unsigned long)-1);
  2935. if (err) /* File system offline */
  2936. goto out;
  2937. }
  2938. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2939. while (cache) {
  2940. if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
  2941. btrfs_put_block_group(cache);
  2942. goto again;
  2943. }
  2944. if (cache->dirty)
  2945. break;
  2946. cache = next_block_group(root, cache);
  2947. }
  2948. if (!cache) {
  2949. if (last == 0)
  2950. break;
  2951. last = 0;
  2952. continue;
  2953. }
  2954. if (cache->disk_cache_state == BTRFS_DC_SETUP)
  2955. cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
  2956. cache->dirty = 0;
  2957. last = cache->key.objectid + cache->key.offset;
  2958. err = write_one_cache_group(trans, root, path, cache);
  2959. btrfs_put_block_group(cache);
  2960. if (err) /* File system offline */
  2961. goto out;
  2962. }
  2963. while (1) {
  2964. /*
  2965. * I don't think this is needed since we're just marking our
  2966. * preallocated extent as written, but just in case it can't
  2967. * hurt.
  2968. */
  2969. if (last == 0) {
  2970. err = btrfs_run_delayed_refs(trans, root,
  2971. (unsigned long)-1);
  2972. if (err) /* File system offline */
  2973. goto out;
  2974. }
  2975. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2976. while (cache) {
  2977. /*
  2978. * Really this shouldn't happen, but it could if we
  2979. * couldn't write the entire preallocated extent and
  2980. * splitting the extent resulted in a new block.
  2981. */
  2982. if (cache->dirty) {
  2983. btrfs_put_block_group(cache);
  2984. goto again;
  2985. }
  2986. if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  2987. break;
  2988. cache = next_block_group(root, cache);
  2989. }
  2990. if (!cache) {
  2991. if (last == 0)
  2992. break;
  2993. last = 0;
  2994. continue;
  2995. }
  2996. err = btrfs_write_out_cache(root, trans, cache, path);
  2997. /*
  2998. * If we didn't have an error then the cache state is still
  2999. * NEED_WRITE, so we can set it to WRITTEN.
  3000. */
  3001. if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  3002. cache->disk_cache_state = BTRFS_DC_WRITTEN;
  3003. last = cache->key.objectid + cache->key.offset;
  3004. btrfs_put_block_group(cache);
  3005. }
  3006. out:
  3007. btrfs_free_path(path);
  3008. return err;
  3009. }
  3010. int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
  3011. {
  3012. struct btrfs_block_group_cache *block_group;
  3013. int readonly = 0;
  3014. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  3015. if (!block_group || block_group->ro)
  3016. readonly = 1;
  3017. if (block_group)
  3018. btrfs_put_block_group(block_group);
  3019. return readonly;
  3020. }
  3021. static int update_space_info(struct btrfs_fs_info *info, u64 flags,
  3022. u64 total_bytes, u64 bytes_used,
  3023. struct btrfs_space_info **space_info)
  3024. {
  3025. struct btrfs_space_info *found;
  3026. int i;
  3027. int factor;
  3028. int ret;
  3029. if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
  3030. BTRFS_BLOCK_GROUP_RAID10))
  3031. factor = 2;
  3032. else
  3033. factor = 1;
  3034. found = __find_space_info(info, flags);
  3035. if (found) {
  3036. spin_lock(&found->lock);
  3037. found->total_bytes += total_bytes;
  3038. found->disk_total += total_bytes * factor;
  3039. found->bytes_used += bytes_used;
  3040. found->disk_used += bytes_used * factor;
  3041. found->full = 0;
  3042. spin_unlock(&found->lock);
  3043. *space_info = found;
  3044. return 0;
  3045. }
  3046. found = kzalloc(sizeof(*found), GFP_NOFS);
  3047. if (!found)
  3048. return -ENOMEM;
  3049. ret = percpu_counter_init(&found->total_bytes_pinned, 0);
  3050. if (ret) {
  3051. kfree(found);
  3052. return ret;
  3053. }
  3054. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  3055. INIT_LIST_HEAD(&found->block_groups[i]);
  3056. init_rwsem(&found->groups_sem);
  3057. spin_lock_init(&found->lock);
  3058. found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
  3059. found->total_bytes = total_bytes;
  3060. found->disk_total = total_bytes * factor;
  3061. found->bytes_used = bytes_used;
  3062. found->disk_used = bytes_used * factor;
  3063. found->bytes_pinned = 0;
  3064. found->bytes_reserved = 0;
  3065. found->bytes_readonly = 0;
  3066. found->bytes_may_use = 0;
  3067. found->full = 0;
  3068. found->force_alloc = CHUNK_ALLOC_NO_FORCE;
  3069. found->chunk_alloc = 0;
  3070. found->flush = 0;
  3071. init_waitqueue_head(&found->wait);
  3072. *space_info = found;
  3073. list_add_rcu(&found->list, &info->space_info);
  3074. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3075. info->data_sinfo = found;
  3076. return 0;
  3077. }
  3078. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  3079. {
  3080. u64 extra_flags = chunk_to_extended(flags) &
  3081. BTRFS_EXTENDED_PROFILE_MASK;
  3082. write_seqlock(&fs_info->profiles_lock);
  3083. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3084. fs_info->avail_data_alloc_bits |= extra_flags;
  3085. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  3086. fs_info->avail_metadata_alloc_bits |= extra_flags;
  3087. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3088. fs_info->avail_system_alloc_bits |= extra_flags;
  3089. write_sequnlock(&fs_info->profiles_lock);
  3090. }
  3091. /*
  3092. * returns target flags in extended format or 0 if restripe for this
  3093. * chunk_type is not in progress
  3094. *
  3095. * should be called with either volume_mutex or balance_lock held
  3096. */
  3097. static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  3098. {
  3099. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  3100. u64 target = 0;
  3101. if (!bctl)
  3102. return 0;
  3103. if (flags & BTRFS_BLOCK_GROUP_DATA &&
  3104. bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3105. target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  3106. } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  3107. bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3108. target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  3109. } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  3110. bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3111. target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  3112. }
  3113. return target;
  3114. }
  3115. /*
  3116. * @flags: available profiles in extended format (see ctree.h)
  3117. *
  3118. * Returns reduced profile in chunk format. If profile changing is in
  3119. * progress (either running or paused) picks the target profile (if it's
  3120. * already available), otherwise falls back to plain reducing.
  3121. */
  3122. static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
  3123. {
  3124. /*
  3125. * we add in the count of missing devices because we want
  3126. * to make sure that any RAID levels on a degraded FS
  3127. * continue to be honored.
  3128. */
  3129. u64 num_devices = root->fs_info->fs_devices->rw_devices +
  3130. root->fs_info->fs_devices->missing_devices;
  3131. u64 target;
  3132. u64 tmp;
  3133. /*
  3134. * see if restripe for this chunk_type is in progress, if so
  3135. * try to reduce to the target profile
  3136. */
  3137. spin_lock(&root->fs_info->balance_lock);
  3138. target = get_restripe_target(root->fs_info, flags);
  3139. if (target) {
  3140. /* pick target profile only if it's already available */
  3141. if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
  3142. spin_unlock(&root->fs_info->balance_lock);
  3143. return extended_to_chunk(target);
  3144. }
  3145. }
  3146. spin_unlock(&root->fs_info->balance_lock);
  3147. /* First, mask out the RAID levels which aren't possible */
  3148. if (num_devices == 1)
  3149. flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
  3150. BTRFS_BLOCK_GROUP_RAID5);
  3151. if (num_devices < 3)
  3152. flags &= ~BTRFS_BLOCK_GROUP_RAID6;
  3153. if (num_devices < 4)
  3154. flags &= ~BTRFS_BLOCK_GROUP_RAID10;
  3155. tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
  3156. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
  3157. BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
  3158. flags &= ~tmp;
  3159. if (tmp & BTRFS_BLOCK_GROUP_RAID6)
  3160. tmp = BTRFS_BLOCK_GROUP_RAID6;
  3161. else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
  3162. tmp = BTRFS_BLOCK_GROUP_RAID5;
  3163. else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
  3164. tmp = BTRFS_BLOCK_GROUP_RAID10;
  3165. else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
  3166. tmp = BTRFS_BLOCK_GROUP_RAID1;
  3167. else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
  3168. tmp = BTRFS_BLOCK_GROUP_RAID0;
  3169. return extended_to_chunk(flags | tmp);
  3170. }
  3171. static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
  3172. {
  3173. unsigned seq;
  3174. do {
  3175. seq = read_seqbegin(&root->fs_info->profiles_lock);
  3176. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3177. flags |= root->fs_info->avail_data_alloc_bits;
  3178. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3179. flags |= root->fs_info->avail_system_alloc_bits;
  3180. else if (flags & BTRFS_BLOCK_GROUP_METADATA)
  3181. flags |= root->fs_info->avail_metadata_alloc_bits;
  3182. } while (read_seqretry(&root->fs_info->profiles_lock, seq));
  3183. return btrfs_reduce_alloc_profile(root, flags);
  3184. }
  3185. u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
  3186. {
  3187. u64 flags;
  3188. u64 ret;
  3189. if (data)
  3190. flags = BTRFS_BLOCK_GROUP_DATA;
  3191. else if (root == root->fs_info->chunk_root)
  3192. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  3193. else
  3194. flags = BTRFS_BLOCK_GROUP_METADATA;
  3195. ret = get_alloc_profile(root, flags);
  3196. return ret;
  3197. }
  3198. /*
  3199. * This will check the space that the inode allocates from to make sure we have
  3200. * enough space for bytes.
  3201. */
  3202. int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
  3203. {
  3204. struct btrfs_space_info *data_sinfo;
  3205. struct btrfs_root *root = BTRFS_I(inode)->root;
  3206. struct btrfs_fs_info *fs_info = root->fs_info;
  3207. u64 used;
  3208. int ret = 0, committed = 0, alloc_chunk = 1;
  3209. /* make sure bytes are sectorsize aligned */
  3210. bytes = ALIGN(bytes, root->sectorsize);
  3211. if (btrfs_is_free_space_inode(inode)) {
  3212. committed = 1;
  3213. ASSERT(current->journal_info);
  3214. }
  3215. data_sinfo = fs_info->data_sinfo;
  3216. if (!data_sinfo)
  3217. goto alloc;
  3218. again:
  3219. /* make sure we have enough space to handle the data first */
  3220. spin_lock(&data_sinfo->lock);
  3221. used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
  3222. data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
  3223. data_sinfo->bytes_may_use;
  3224. if (used + bytes > data_sinfo->total_bytes) {
  3225. struct btrfs_trans_handle *trans;
  3226. /*
  3227. * if we don't have enough free bytes in this space then we need
  3228. * to alloc a new chunk.
  3229. */
  3230. if (!data_sinfo->full && alloc_chunk) {
  3231. u64 alloc_target;
  3232. data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
  3233. spin_unlock(&data_sinfo->lock);
  3234. alloc:
  3235. alloc_target = btrfs_get_alloc_profile(root, 1);
  3236. /*
  3237. * It is ugly that we don't call nolock join
  3238. * transaction for the free space inode case here.
  3239. * But it is safe because we only do the data space
  3240. * reservation for the free space cache in the
  3241. * transaction context, the common join transaction
  3242. * just increase the counter of the current transaction
  3243. * handler, doesn't try to acquire the trans_lock of
  3244. * the fs.
  3245. */
  3246. trans = btrfs_join_transaction(root);
  3247. if (IS_ERR(trans))
  3248. return PTR_ERR(trans);
  3249. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  3250. alloc_target,
  3251. CHUNK_ALLOC_NO_FORCE);
  3252. btrfs_end_transaction(trans, root);
  3253. if (ret < 0) {
  3254. if (ret != -ENOSPC)
  3255. return ret;
  3256. else
  3257. goto commit_trans;
  3258. }
  3259. if (!data_sinfo)
  3260. data_sinfo = fs_info->data_sinfo;
  3261. goto again;
  3262. }
  3263. /*
  3264. * If we don't have enough pinned space to deal with this
  3265. * allocation don't bother committing the transaction.
  3266. */
  3267. if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
  3268. bytes) < 0)
  3269. committed = 1;
  3270. spin_unlock(&data_sinfo->lock);
  3271. /* commit the current transaction and try again */
  3272. commit_trans:
  3273. if (!committed &&
  3274. !atomic_read(&root->fs_info->open_ioctl_trans)) {
  3275. committed = 1;
  3276. trans = btrfs_join_transaction(root);
  3277. if (IS_ERR(trans))
  3278. return PTR_ERR(trans);
  3279. ret = btrfs_commit_transaction(trans, root);
  3280. if (ret)
  3281. return ret;
  3282. goto again;
  3283. }
  3284. trace_btrfs_space_reservation(root->fs_info,
  3285. "space_info:enospc",
  3286. data_sinfo->flags, bytes, 1);
  3287. return -ENOSPC;
  3288. }
  3289. data_sinfo->bytes_may_use += bytes;
  3290. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3291. data_sinfo->flags, bytes, 1);
  3292. spin_unlock(&data_sinfo->lock);
  3293. return 0;
  3294. }
  3295. /*
  3296. * Called if we need to clear a data reservation for this inode.
  3297. */
  3298. void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
  3299. {
  3300. struct btrfs_root *root = BTRFS_I(inode)->root;
  3301. struct btrfs_space_info *data_sinfo;
  3302. /* make sure bytes are sectorsize aligned */
  3303. bytes = ALIGN(bytes, root->sectorsize);
  3304. data_sinfo = root->fs_info->data_sinfo;
  3305. spin_lock(&data_sinfo->lock);
  3306. WARN_ON(data_sinfo->bytes_may_use < bytes);
  3307. data_sinfo->bytes_may_use -= bytes;
  3308. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3309. data_sinfo->flags, bytes, 0);
  3310. spin_unlock(&data_sinfo->lock);
  3311. }
  3312. static void force_metadata_allocation(struct btrfs_fs_info *info)
  3313. {
  3314. struct list_head *head = &info->space_info;
  3315. struct btrfs_space_info *found;
  3316. rcu_read_lock();
  3317. list_for_each_entry_rcu(found, head, list) {
  3318. if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
  3319. found->force_alloc = CHUNK_ALLOC_FORCE;
  3320. }
  3321. rcu_read_unlock();
  3322. }
  3323. static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
  3324. {
  3325. return (global->size << 1);
  3326. }
  3327. static int should_alloc_chunk(struct btrfs_root *root,
  3328. struct btrfs_space_info *sinfo, int force)
  3329. {
  3330. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3331. u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
  3332. u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
  3333. u64 thresh;
  3334. if (force == CHUNK_ALLOC_FORCE)
  3335. return 1;
  3336. /*
  3337. * We need to take into account the global rsv because for all intents
  3338. * and purposes it's used space. Don't worry about locking the
  3339. * global_rsv, it doesn't change except when the transaction commits.
  3340. */
  3341. if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
  3342. num_allocated += calc_global_rsv_need_space(global_rsv);
  3343. /*
  3344. * in limited mode, we want to have some free space up to
  3345. * about 1% of the FS size.
  3346. */
  3347. if (force == CHUNK_ALLOC_LIMITED) {
  3348. thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
  3349. thresh = max_t(u64, 64 * 1024 * 1024,
  3350. div_factor_fine(thresh, 1));
  3351. if (num_bytes - num_allocated < thresh)
  3352. return 1;
  3353. }
  3354. if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
  3355. return 0;
  3356. return 1;
  3357. }
  3358. static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
  3359. {
  3360. u64 num_dev;
  3361. if (type & (BTRFS_BLOCK_GROUP_RAID10 |
  3362. BTRFS_BLOCK_GROUP_RAID0 |
  3363. BTRFS_BLOCK_GROUP_RAID5 |
  3364. BTRFS_BLOCK_GROUP_RAID6))
  3365. num_dev = root->fs_info->fs_devices->rw_devices;
  3366. else if (type & BTRFS_BLOCK_GROUP_RAID1)
  3367. num_dev = 2;
  3368. else
  3369. num_dev = 1; /* DUP or single */
  3370. /* metadata for updaing devices and chunk tree */
  3371. return btrfs_calc_trans_metadata_size(root, num_dev + 1);
  3372. }
  3373. static void check_system_chunk(struct btrfs_trans_handle *trans,
  3374. struct btrfs_root *root, u64 type)
  3375. {
  3376. struct btrfs_space_info *info;
  3377. u64 left;
  3378. u64 thresh;
  3379. info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3380. spin_lock(&info->lock);
  3381. left = info->total_bytes - info->bytes_used - info->bytes_pinned -
  3382. info->bytes_reserved - info->bytes_readonly;
  3383. spin_unlock(&info->lock);
  3384. thresh = get_system_chunk_thresh(root, type);
  3385. if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
  3386. btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
  3387. left, thresh, type);
  3388. dump_space_info(info, 0, 0);
  3389. }
  3390. if (left < thresh) {
  3391. u64 flags;
  3392. flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
  3393. btrfs_alloc_chunk(trans, root, flags);
  3394. }
  3395. }
  3396. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  3397. struct btrfs_root *extent_root, u64 flags, int force)
  3398. {
  3399. struct btrfs_space_info *space_info;
  3400. struct btrfs_fs_info *fs_info = extent_root->fs_info;
  3401. int wait_for_alloc = 0;
  3402. int ret = 0;
  3403. /* Don't re-enter if we're already allocating a chunk */
  3404. if (trans->allocating_chunk)
  3405. return -ENOSPC;
  3406. space_info = __find_space_info(extent_root->fs_info, flags);
  3407. if (!space_info) {
  3408. ret = update_space_info(extent_root->fs_info, flags,
  3409. 0, 0, &space_info);
  3410. BUG_ON(ret); /* -ENOMEM */
  3411. }
  3412. BUG_ON(!space_info); /* Logic error */
  3413. again:
  3414. spin_lock(&space_info->lock);
  3415. if (force < space_info->force_alloc)
  3416. force = space_info->force_alloc;
  3417. if (space_info->full) {
  3418. if (should_alloc_chunk(extent_root, space_info, force))
  3419. ret = -ENOSPC;
  3420. else
  3421. ret = 0;
  3422. spin_unlock(&space_info->lock);
  3423. return ret;
  3424. }
  3425. if (!should_alloc_chunk(extent_root, space_info, force)) {
  3426. spin_unlock(&space_info->lock);
  3427. return 0;
  3428. } else if (space_info->chunk_alloc) {
  3429. wait_for_alloc = 1;
  3430. } else {
  3431. space_info->chunk_alloc = 1;
  3432. }
  3433. spin_unlock(&space_info->lock);
  3434. mutex_lock(&fs_info->chunk_mutex);
  3435. /*
  3436. * The chunk_mutex is held throughout the entirety of a chunk
  3437. * allocation, so once we've acquired the chunk_mutex we know that the
  3438. * other guy is done and we need to recheck and see if we should
  3439. * allocate.
  3440. */
  3441. if (wait_for_alloc) {
  3442. mutex_unlock(&fs_info->chunk_mutex);
  3443. wait_for_alloc = 0;
  3444. goto again;
  3445. }
  3446. trans->allocating_chunk = true;
  3447. /*
  3448. * If we have mixed data/metadata chunks we want to make sure we keep
  3449. * allocating mixed chunks instead of individual chunks.
  3450. */
  3451. if (btrfs_mixed_space_info(space_info))
  3452. flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
  3453. /*
  3454. * if we're doing a data chunk, go ahead and make sure that
  3455. * we keep a reasonable number of metadata chunks allocated in the
  3456. * FS as well.
  3457. */
  3458. if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
  3459. fs_info->data_chunk_allocations++;
  3460. if (!(fs_info->data_chunk_allocations %
  3461. fs_info->metadata_ratio))
  3462. force_metadata_allocation(fs_info);
  3463. }
  3464. /*
  3465. * Check if we have enough space in SYSTEM chunk because we may need
  3466. * to update devices.
  3467. */
  3468. check_system_chunk(trans, extent_root, flags);
  3469. ret = btrfs_alloc_chunk(trans, extent_root, flags);
  3470. trans->allocating_chunk = false;
  3471. spin_lock(&space_info->lock);
  3472. if (ret < 0 && ret != -ENOSPC)
  3473. goto out;
  3474. if (ret)
  3475. space_info->full = 1;
  3476. else
  3477. ret = 1;
  3478. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  3479. out:
  3480. space_info->chunk_alloc = 0;
  3481. spin_unlock(&space_info->lock);
  3482. mutex_unlock(&fs_info->chunk_mutex);
  3483. return ret;
  3484. }
  3485. static int can_overcommit(struct btrfs_root *root,
  3486. struct btrfs_space_info *space_info, u64 bytes,
  3487. enum btrfs_reserve_flush_enum flush)
  3488. {
  3489. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3490. u64 profile = btrfs_get_alloc_profile(root, 0);
  3491. u64 space_size;
  3492. u64 avail;
  3493. u64 used;
  3494. used = space_info->bytes_used + space_info->bytes_reserved +
  3495. space_info->bytes_pinned + space_info->bytes_readonly;
  3496. /*
  3497. * We only want to allow over committing if we have lots of actual space
  3498. * free, but if we don't have enough space to handle the global reserve
  3499. * space then we could end up having a real enospc problem when trying
  3500. * to allocate a chunk or some other such important allocation.
  3501. */
  3502. spin_lock(&global_rsv->lock);
  3503. space_size = calc_global_rsv_need_space(global_rsv);
  3504. spin_unlock(&global_rsv->lock);
  3505. if (used + space_size >= space_info->total_bytes)
  3506. return 0;
  3507. used += space_info->bytes_may_use;
  3508. spin_lock(&root->fs_info->free_chunk_lock);
  3509. avail = root->fs_info->free_chunk_space;
  3510. spin_unlock(&root->fs_info->free_chunk_lock);
  3511. /*
  3512. * If we have dup, raid1 or raid10 then only half of the free
  3513. * space is actually useable. For raid56, the space info used
  3514. * doesn't include the parity drive, so we don't have to
  3515. * change the math
  3516. */
  3517. if (profile & (BTRFS_BLOCK_GROUP_DUP |
  3518. BTRFS_BLOCK_GROUP_RAID1 |
  3519. BTRFS_BLOCK_GROUP_RAID10))
  3520. avail >>= 1;
  3521. /*
  3522. * If we aren't flushing all things, let us overcommit up to
  3523. * 1/2th of the space. If we can flush, don't let us overcommit
  3524. * too much, let it overcommit up to 1/8 of the space.
  3525. */
  3526. if (flush == BTRFS_RESERVE_FLUSH_ALL)
  3527. avail >>= 3;
  3528. else
  3529. avail >>= 1;
  3530. if (used + bytes < space_info->total_bytes + avail)
  3531. return 1;
  3532. return 0;
  3533. }
  3534. static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
  3535. unsigned long nr_pages)
  3536. {
  3537. struct super_block *sb = root->fs_info->sb;
  3538. if (down_read_trylock(&sb->s_umount)) {
  3539. writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
  3540. up_read(&sb->s_umount);
  3541. } else {
  3542. /*
  3543. * We needn't worry the filesystem going from r/w to r/o though
  3544. * we don't acquire ->s_umount mutex, because the filesystem
  3545. * should guarantee the delalloc inodes list be empty after
  3546. * the filesystem is readonly(all dirty pages are written to
  3547. * the disk).
  3548. */
  3549. btrfs_start_all_delalloc_inodes(root->fs_info, 0);
  3550. if (!current->journal_info)
  3551. btrfs_wait_all_ordered_extents(root->fs_info);
  3552. }
  3553. }
  3554. /*
  3555. * shrink metadata reservation for delalloc
  3556. */
  3557. static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
  3558. bool wait_ordered)
  3559. {
  3560. struct btrfs_block_rsv *block_rsv;
  3561. struct btrfs_space_info *space_info;
  3562. struct btrfs_trans_handle *trans;
  3563. u64 delalloc_bytes;
  3564. u64 max_reclaim;
  3565. long time_left;
  3566. unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
  3567. int loops = 0;
  3568. enum btrfs_reserve_flush_enum flush;
  3569. trans = (struct btrfs_trans_handle *)current->journal_info;
  3570. block_rsv = &root->fs_info->delalloc_block_rsv;
  3571. space_info = block_rsv->space_info;
  3572. smp_mb();
  3573. delalloc_bytes = percpu_counter_sum_positive(
  3574. &root->fs_info->delalloc_bytes);
  3575. if (delalloc_bytes == 0) {
  3576. if (trans)
  3577. return;
  3578. btrfs_wait_all_ordered_extents(root->fs_info);
  3579. return;
  3580. }
  3581. while (delalloc_bytes && loops < 3) {
  3582. max_reclaim = min(delalloc_bytes, to_reclaim);
  3583. nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
  3584. btrfs_writeback_inodes_sb_nr(root, nr_pages);
  3585. /*
  3586. * We need to wait for the async pages to actually start before
  3587. * we do anything.
  3588. */
  3589. wait_event(root->fs_info->async_submit_wait,
  3590. !atomic_read(&root->fs_info->async_delalloc_pages));
  3591. if (!trans)
  3592. flush = BTRFS_RESERVE_FLUSH_ALL;
  3593. else
  3594. flush = BTRFS_RESERVE_NO_FLUSH;
  3595. spin_lock(&space_info->lock);
  3596. if (can_overcommit(root, space_info, orig, flush)) {
  3597. spin_unlock(&space_info->lock);
  3598. break;
  3599. }
  3600. spin_unlock(&space_info->lock);
  3601. loops++;
  3602. if (wait_ordered && !trans) {
  3603. btrfs_wait_all_ordered_extents(root->fs_info);
  3604. } else {
  3605. time_left = schedule_timeout_killable(1);
  3606. if (time_left)
  3607. break;
  3608. }
  3609. smp_mb();
  3610. delalloc_bytes = percpu_counter_sum_positive(
  3611. &root->fs_info->delalloc_bytes);
  3612. }
  3613. }
  3614. /**
  3615. * maybe_commit_transaction - possibly commit the transaction if its ok to
  3616. * @root - the root we're allocating for
  3617. * @bytes - the number of bytes we want to reserve
  3618. * @force - force the commit
  3619. *
  3620. * This will check to make sure that committing the transaction will actually
  3621. * get us somewhere and then commit the transaction if it does. Otherwise it
  3622. * will return -ENOSPC.
  3623. */
  3624. static int may_commit_transaction(struct btrfs_root *root,
  3625. struct btrfs_space_info *space_info,
  3626. u64 bytes, int force)
  3627. {
  3628. struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
  3629. struct btrfs_trans_handle *trans;
  3630. trans = (struct btrfs_trans_handle *)current->journal_info;
  3631. if (trans)
  3632. return -EAGAIN;
  3633. if (force)
  3634. goto commit;
  3635. /* See if there is enough pinned space to make this reservation */
  3636. spin_lock(&space_info->lock);
  3637. if (percpu_counter_compare(&space_info->total_bytes_pinned,
  3638. bytes) >= 0) {
  3639. spin_unlock(&space_info->lock);
  3640. goto commit;
  3641. }
  3642. spin_unlock(&space_info->lock);
  3643. /*
  3644. * See if there is some space in the delayed insertion reservation for
  3645. * this reservation.
  3646. */
  3647. if (space_info != delayed_rsv->space_info)
  3648. return -ENOSPC;
  3649. spin_lock(&space_info->lock);
  3650. spin_lock(&delayed_rsv->lock);
  3651. if (percpu_counter_compare(&space_info->total_bytes_pinned,
  3652. bytes - delayed_rsv->size) >= 0) {
  3653. spin_unlock(&delayed_rsv->lock);
  3654. spin_unlock(&space_info->lock);
  3655. return -ENOSPC;
  3656. }
  3657. spin_unlock(&delayed_rsv->lock);
  3658. spin_unlock(&space_info->lock);
  3659. commit:
  3660. trans = btrfs_join_transaction(root);
  3661. if (IS_ERR(trans))
  3662. return -ENOSPC;
  3663. return btrfs_commit_transaction(trans, root);
  3664. }
  3665. enum flush_state {
  3666. FLUSH_DELAYED_ITEMS_NR = 1,
  3667. FLUSH_DELAYED_ITEMS = 2,
  3668. FLUSH_DELALLOC = 3,
  3669. FLUSH_DELALLOC_WAIT = 4,
  3670. ALLOC_CHUNK = 5,
  3671. COMMIT_TRANS = 6,
  3672. };
  3673. static int flush_space(struct btrfs_root *root,
  3674. struct btrfs_space_info *space_info, u64 num_bytes,
  3675. u64 orig_bytes, int state)
  3676. {
  3677. struct btrfs_trans_handle *trans;
  3678. int nr;
  3679. int ret = 0;
  3680. switch (state) {
  3681. case FLUSH_DELAYED_ITEMS_NR:
  3682. case FLUSH_DELAYED_ITEMS:
  3683. if (state == FLUSH_DELAYED_ITEMS_NR) {
  3684. u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
  3685. nr = (int)div64_u64(num_bytes, bytes);
  3686. if (!nr)
  3687. nr = 1;
  3688. nr *= 2;
  3689. } else {
  3690. nr = -1;
  3691. }
  3692. trans = btrfs_join_transaction(root);
  3693. if (IS_ERR(trans)) {
  3694. ret = PTR_ERR(trans);
  3695. break;
  3696. }
  3697. ret = btrfs_run_delayed_items_nr(trans, root, nr);
  3698. btrfs_end_transaction(trans, root);
  3699. break;
  3700. case FLUSH_DELALLOC:
  3701. case FLUSH_DELALLOC_WAIT:
  3702. shrink_delalloc(root, num_bytes, orig_bytes,
  3703. state == FLUSH_DELALLOC_WAIT);
  3704. break;
  3705. case ALLOC_CHUNK:
  3706. trans = btrfs_join_transaction(root);
  3707. if (IS_ERR(trans)) {
  3708. ret = PTR_ERR(trans);
  3709. break;
  3710. }
  3711. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  3712. btrfs_get_alloc_profile(root, 0),
  3713. CHUNK_ALLOC_NO_FORCE);
  3714. btrfs_end_transaction(trans, root);
  3715. if (ret == -ENOSPC)
  3716. ret = 0;
  3717. break;
  3718. case COMMIT_TRANS:
  3719. ret = may_commit_transaction(root, space_info, orig_bytes, 0);
  3720. break;
  3721. default:
  3722. ret = -ENOSPC;
  3723. break;
  3724. }
  3725. return ret;
  3726. }
  3727. /**
  3728. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  3729. * @root - the root we're allocating for
  3730. * @block_rsv - the block_rsv we're allocating for
  3731. * @orig_bytes - the number of bytes we want
  3732. * @flush - whether or not we can flush to make our reservation
  3733. *
  3734. * This will reserve orgi_bytes number of bytes from the space info associated
  3735. * with the block_rsv. If there is not enough space it will make an attempt to
  3736. * flush out space to make room. It will do this by flushing delalloc if
  3737. * possible or committing the transaction. If flush is 0 then no attempts to
  3738. * regain reservations will be made and this will fail if there is not enough
  3739. * space already.
  3740. */
  3741. static int reserve_metadata_bytes(struct btrfs_root *root,
  3742. struct btrfs_block_rsv *block_rsv,
  3743. u64 orig_bytes,
  3744. enum btrfs_reserve_flush_enum flush)
  3745. {
  3746. struct btrfs_space_info *space_info = block_rsv->space_info;
  3747. u64 used;
  3748. u64 num_bytes = orig_bytes;
  3749. int flush_state = FLUSH_DELAYED_ITEMS_NR;
  3750. int ret = 0;
  3751. bool flushing = false;
  3752. again:
  3753. ret = 0;
  3754. spin_lock(&space_info->lock);
  3755. /*
  3756. * We only want to wait if somebody other than us is flushing and we
  3757. * are actually allowed to flush all things.
  3758. */
  3759. while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
  3760. space_info->flush) {
  3761. spin_unlock(&space_info->lock);
  3762. /*
  3763. * If we have a trans handle we can't wait because the flusher
  3764. * may have to commit the transaction, which would mean we would
  3765. * deadlock since we are waiting for the flusher to finish, but
  3766. * hold the current transaction open.
  3767. */
  3768. if (current->journal_info)
  3769. return -EAGAIN;
  3770. ret = wait_event_killable(space_info->wait, !space_info->flush);
  3771. /* Must have been killed, return */
  3772. if (ret)
  3773. return -EINTR;
  3774. spin_lock(&space_info->lock);
  3775. }
  3776. ret = -ENOSPC;
  3777. used = space_info->bytes_used + space_info->bytes_reserved +
  3778. space_info->bytes_pinned + space_info->bytes_readonly +
  3779. space_info->bytes_may_use;
  3780. /*
  3781. * The idea here is that we've not already over-reserved the block group
  3782. * then we can go ahead and save our reservation first and then start
  3783. * flushing if we need to. Otherwise if we've already overcommitted
  3784. * lets start flushing stuff first and then come back and try to make
  3785. * our reservation.
  3786. */
  3787. if (used <= space_info->total_bytes) {
  3788. if (used + orig_bytes <= space_info->total_bytes) {
  3789. space_info->bytes_may_use += orig_bytes;
  3790. trace_btrfs_space_reservation(root->fs_info,
  3791. "space_info", space_info->flags, orig_bytes, 1);
  3792. ret = 0;
  3793. } else {
  3794. /*
  3795. * Ok set num_bytes to orig_bytes since we aren't
  3796. * overocmmitted, this way we only try and reclaim what
  3797. * we need.
  3798. */
  3799. num_bytes = orig_bytes;
  3800. }
  3801. } else {
  3802. /*
  3803. * Ok we're over committed, set num_bytes to the overcommitted
  3804. * amount plus the amount of bytes that we need for this
  3805. * reservation.
  3806. */
  3807. num_bytes = used - space_info->total_bytes +
  3808. (orig_bytes * 2);
  3809. }
  3810. if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
  3811. space_info->bytes_may_use += orig_bytes;
  3812. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3813. space_info->flags, orig_bytes,
  3814. 1);
  3815. ret = 0;
  3816. }
  3817. /*
  3818. * Couldn't make our reservation, save our place so while we're trying
  3819. * to reclaim space we can actually use it instead of somebody else
  3820. * stealing it from us.
  3821. *
  3822. * We make the other tasks wait for the flush only when we can flush
  3823. * all things.
  3824. */
  3825. if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
  3826. flushing = true;
  3827. space_info->flush = 1;
  3828. }
  3829. spin_unlock(&space_info->lock);
  3830. if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
  3831. goto out;
  3832. ret = flush_space(root, space_info, num_bytes, orig_bytes,
  3833. flush_state);
  3834. flush_state++;
  3835. /*
  3836. * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
  3837. * would happen. So skip delalloc flush.
  3838. */
  3839. if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
  3840. (flush_state == FLUSH_DELALLOC ||
  3841. flush_state == FLUSH_DELALLOC_WAIT))
  3842. flush_state = ALLOC_CHUNK;
  3843. if (!ret)
  3844. goto again;
  3845. else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
  3846. flush_state < COMMIT_TRANS)
  3847. goto again;
  3848. else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
  3849. flush_state <= COMMIT_TRANS)
  3850. goto again;
  3851. out:
  3852. if (ret == -ENOSPC &&
  3853. unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
  3854. struct btrfs_block_rsv *global_rsv =
  3855. &root->fs_info->global_block_rsv;
  3856. if (block_rsv != global_rsv &&
  3857. !block_rsv_use_bytes(global_rsv, orig_bytes))
  3858. ret = 0;
  3859. }
  3860. if (ret == -ENOSPC)
  3861. trace_btrfs_space_reservation(root->fs_info,
  3862. "space_info:enospc",
  3863. space_info->flags, orig_bytes, 1);
  3864. if (flushing) {
  3865. spin_lock(&space_info->lock);
  3866. space_info->flush = 0;
  3867. wake_up_all(&space_info->wait);
  3868. spin_unlock(&space_info->lock);
  3869. }
  3870. return ret;
  3871. }
  3872. static struct btrfs_block_rsv *get_block_rsv(
  3873. const struct btrfs_trans_handle *trans,
  3874. const struct btrfs_root *root)
  3875. {
  3876. struct btrfs_block_rsv *block_rsv = NULL;
  3877. if (root->ref_cows)
  3878. block_rsv = trans->block_rsv;
  3879. if (root == root->fs_info->csum_root && trans->adding_csums)
  3880. block_rsv = trans->block_rsv;
  3881. if (root == root->fs_info->uuid_root)
  3882. block_rsv = trans->block_rsv;
  3883. if (!block_rsv)
  3884. block_rsv = root->block_rsv;
  3885. if (!block_rsv)
  3886. block_rsv = &root->fs_info->empty_block_rsv;
  3887. return block_rsv;
  3888. }
  3889. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  3890. u64 num_bytes)
  3891. {
  3892. int ret = -ENOSPC;
  3893. spin_lock(&block_rsv->lock);
  3894. if (block_rsv->reserved >= num_bytes) {
  3895. block_rsv->reserved -= num_bytes;
  3896. if (block_rsv->reserved < block_rsv->size)
  3897. block_rsv->full = 0;
  3898. ret = 0;
  3899. }
  3900. spin_unlock(&block_rsv->lock);
  3901. return ret;
  3902. }
  3903. static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
  3904. u64 num_bytes, int update_size)
  3905. {
  3906. spin_lock(&block_rsv->lock);
  3907. block_rsv->reserved += num_bytes;
  3908. if (update_size)
  3909. block_rsv->size += num_bytes;
  3910. else if (block_rsv->reserved >= block_rsv->size)
  3911. block_rsv->full = 1;
  3912. spin_unlock(&block_rsv->lock);
  3913. }
  3914. int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
  3915. struct btrfs_block_rsv *dest, u64 num_bytes,
  3916. int min_factor)
  3917. {
  3918. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  3919. u64 min_bytes;
  3920. if (global_rsv->space_info != dest->space_info)
  3921. return -ENOSPC;
  3922. spin_lock(&global_rsv->lock);
  3923. min_bytes = div_factor(global_rsv->size, min_factor);
  3924. if (global_rsv->reserved < min_bytes + num_bytes) {
  3925. spin_unlock(&global_rsv->lock);
  3926. return -ENOSPC;
  3927. }
  3928. global_rsv->reserved -= num_bytes;
  3929. if (global_rsv->reserved < global_rsv->size)
  3930. global_rsv->full = 0;
  3931. spin_unlock(&global_rsv->lock);
  3932. block_rsv_add_bytes(dest, num_bytes, 1);
  3933. return 0;
  3934. }
  3935. static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
  3936. struct btrfs_block_rsv *block_rsv,
  3937. struct btrfs_block_rsv *dest, u64 num_bytes)
  3938. {
  3939. struct btrfs_space_info *space_info = block_rsv->space_info;
  3940. spin_lock(&block_rsv->lock);
  3941. if (num_bytes == (u64)-1)
  3942. num_bytes = block_rsv->size;
  3943. block_rsv->size -= num_bytes;
  3944. if (block_rsv->reserved >= block_rsv->size) {
  3945. num_bytes = block_rsv->reserved - block_rsv->size;
  3946. block_rsv->reserved = block_rsv->size;
  3947. block_rsv->full = 1;
  3948. } else {
  3949. num_bytes = 0;
  3950. }
  3951. spin_unlock(&block_rsv->lock);
  3952. if (num_bytes > 0) {
  3953. if (dest) {
  3954. spin_lock(&dest->lock);
  3955. if (!dest->full) {
  3956. u64 bytes_to_add;
  3957. bytes_to_add = dest->size - dest->reserved;
  3958. bytes_to_add = min(num_bytes, bytes_to_add);
  3959. dest->reserved += bytes_to_add;
  3960. if (dest->reserved >= dest->size)
  3961. dest->full = 1;
  3962. num_bytes -= bytes_to_add;
  3963. }
  3964. spin_unlock(&dest->lock);
  3965. }
  3966. if (num_bytes) {
  3967. spin_lock(&space_info->lock);
  3968. space_info->bytes_may_use -= num_bytes;
  3969. trace_btrfs_space_reservation(fs_info, "space_info",
  3970. space_info->flags, num_bytes, 0);
  3971. spin_unlock(&space_info->lock);
  3972. }
  3973. }
  3974. }
  3975. static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
  3976. struct btrfs_block_rsv *dst, u64 num_bytes)
  3977. {
  3978. int ret;
  3979. ret = block_rsv_use_bytes(src, num_bytes);
  3980. if (ret)
  3981. return ret;
  3982. block_rsv_add_bytes(dst, num_bytes, 1);
  3983. return 0;
  3984. }
  3985. void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
  3986. {
  3987. memset(rsv, 0, sizeof(*rsv));
  3988. spin_lock_init(&rsv->lock);
  3989. rsv->type = type;
  3990. }
  3991. struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
  3992. unsigned short type)
  3993. {
  3994. struct btrfs_block_rsv *block_rsv;
  3995. struct btrfs_fs_info *fs_info = root->fs_info;
  3996. block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
  3997. if (!block_rsv)
  3998. return NULL;
  3999. btrfs_init_block_rsv(block_rsv, type);
  4000. block_rsv->space_info = __find_space_info(fs_info,
  4001. BTRFS_BLOCK_GROUP_METADATA);
  4002. return block_rsv;
  4003. }
  4004. void btrfs_free_block_rsv(struct btrfs_root *root,
  4005. struct btrfs_block_rsv *rsv)
  4006. {
  4007. if (!rsv)
  4008. return;
  4009. btrfs_block_rsv_release(root, rsv, (u64)-1);
  4010. kfree(rsv);
  4011. }
  4012. int btrfs_block_rsv_add(struct btrfs_root *root,
  4013. struct btrfs_block_rsv *block_rsv, u64 num_bytes,
  4014. enum btrfs_reserve_flush_enum flush)
  4015. {
  4016. int ret;
  4017. if (num_bytes == 0)
  4018. return 0;
  4019. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  4020. if (!ret) {
  4021. block_rsv_add_bytes(block_rsv, num_bytes, 1);
  4022. return 0;
  4023. }
  4024. return ret;
  4025. }
  4026. int btrfs_block_rsv_check(struct btrfs_root *root,
  4027. struct btrfs_block_rsv *block_rsv, int min_factor)
  4028. {
  4029. u64 num_bytes = 0;
  4030. int ret = -ENOSPC;
  4031. if (!block_rsv)
  4032. return 0;
  4033. spin_lock(&block_rsv->lock);
  4034. num_bytes = div_factor(block_rsv->size, min_factor);
  4035. if (block_rsv->reserved >= num_bytes)
  4036. ret = 0;
  4037. spin_unlock(&block_rsv->lock);
  4038. return ret;
  4039. }
  4040. int btrfs_block_rsv_refill(struct btrfs_root *root,
  4041. struct btrfs_block_rsv *block_rsv, u64 min_reserved,
  4042. enum btrfs_reserve_flush_enum flush)
  4043. {
  4044. u64 num_bytes = 0;
  4045. int ret = -ENOSPC;
  4046. if (!block_rsv)
  4047. return 0;
  4048. spin_lock(&block_rsv->lock);
  4049. num_bytes = min_reserved;
  4050. if (block_rsv->reserved >= num_bytes)
  4051. ret = 0;
  4052. else
  4053. num_bytes -= block_rsv->reserved;
  4054. spin_unlock(&block_rsv->lock);
  4055. if (!ret)
  4056. return 0;
  4057. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  4058. if (!ret) {
  4059. block_rsv_add_bytes(block_rsv, num_bytes, 0);
  4060. return 0;
  4061. }
  4062. return ret;
  4063. }
  4064. int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
  4065. struct btrfs_block_rsv *dst_rsv,
  4066. u64 num_bytes)
  4067. {
  4068. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  4069. }
  4070. void btrfs_block_rsv_release(struct btrfs_root *root,
  4071. struct btrfs_block_rsv *block_rsv,
  4072. u64 num_bytes)
  4073. {
  4074. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  4075. if (global_rsv->full || global_rsv == block_rsv ||
  4076. block_rsv->space_info != global_rsv->space_info)
  4077. global_rsv = NULL;
  4078. block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
  4079. num_bytes);
  4080. }
  4081. /*
  4082. * helper to calculate size of global block reservation.
  4083. * the desired value is sum of space used by extent tree,
  4084. * checksum tree and root tree
  4085. */
  4086. static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
  4087. {
  4088. struct btrfs_space_info *sinfo;
  4089. u64 num_bytes;
  4090. u64 meta_used;
  4091. u64 data_used;
  4092. int csum_size = btrfs_super_csum_size(fs_info->super_copy);
  4093. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
  4094. spin_lock(&sinfo->lock);
  4095. data_used = sinfo->bytes_used;
  4096. spin_unlock(&sinfo->lock);
  4097. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  4098. spin_lock(&sinfo->lock);
  4099. if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
  4100. data_used = 0;
  4101. meta_used = sinfo->bytes_used;
  4102. spin_unlock(&sinfo->lock);
  4103. num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
  4104. csum_size * 2;
  4105. num_bytes += div64_u64(data_used + meta_used, 50);
  4106. if (num_bytes * 3 > meta_used)
  4107. num_bytes = div64_u64(meta_used, 3);
  4108. return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
  4109. }
  4110. static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
  4111. {
  4112. struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
  4113. struct btrfs_space_info *sinfo = block_rsv->space_info;
  4114. u64 num_bytes;
  4115. num_bytes = calc_global_metadata_size(fs_info);
  4116. spin_lock(&sinfo->lock);
  4117. spin_lock(&block_rsv->lock);
  4118. block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
  4119. num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
  4120. sinfo->bytes_reserved + sinfo->bytes_readonly +
  4121. sinfo->bytes_may_use;
  4122. if (sinfo->total_bytes > num_bytes) {
  4123. num_bytes = sinfo->total_bytes - num_bytes;
  4124. block_rsv->reserved += num_bytes;
  4125. sinfo->bytes_may_use += num_bytes;
  4126. trace_btrfs_space_reservation(fs_info, "space_info",
  4127. sinfo->flags, num_bytes, 1);
  4128. }
  4129. if (block_rsv->reserved >= block_rsv->size) {
  4130. num_bytes = block_rsv->reserved - block_rsv->size;
  4131. sinfo->bytes_may_use -= num_bytes;
  4132. trace_btrfs_space_reservation(fs_info, "space_info",
  4133. sinfo->flags, num_bytes, 0);
  4134. block_rsv->reserved = block_rsv->size;
  4135. block_rsv->full = 1;
  4136. }
  4137. spin_unlock(&block_rsv->lock);
  4138. spin_unlock(&sinfo->lock);
  4139. }
  4140. static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
  4141. {
  4142. struct btrfs_space_info *space_info;
  4143. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  4144. fs_info->chunk_block_rsv.space_info = space_info;
  4145. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  4146. fs_info->global_block_rsv.space_info = space_info;
  4147. fs_info->delalloc_block_rsv.space_info = space_info;
  4148. fs_info->trans_block_rsv.space_info = space_info;
  4149. fs_info->empty_block_rsv.space_info = space_info;
  4150. fs_info->delayed_block_rsv.space_info = space_info;
  4151. fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
  4152. fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
  4153. fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
  4154. fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
  4155. if (fs_info->quota_root)
  4156. fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
  4157. fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
  4158. update_global_block_rsv(fs_info);
  4159. }
  4160. static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
  4161. {
  4162. block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
  4163. (u64)-1);
  4164. WARN_ON(fs_info->delalloc_block_rsv.size > 0);
  4165. WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
  4166. WARN_ON(fs_info->trans_block_rsv.size > 0);
  4167. WARN_ON(fs_info->trans_block_rsv.reserved > 0);
  4168. WARN_ON(fs_info->chunk_block_rsv.size > 0);
  4169. WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
  4170. WARN_ON(fs_info->delayed_block_rsv.size > 0);
  4171. WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
  4172. }
  4173. void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
  4174. struct btrfs_root *root)
  4175. {
  4176. if (!trans->block_rsv)
  4177. return;
  4178. if (!trans->bytes_reserved)
  4179. return;
  4180. trace_btrfs_space_reservation(root->fs_info, "transaction",
  4181. trans->transid, trans->bytes_reserved, 0);
  4182. btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
  4183. trans->bytes_reserved = 0;
  4184. }
  4185. /* Can only return 0 or -ENOSPC */
  4186. int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
  4187. struct inode *inode)
  4188. {
  4189. struct btrfs_root *root = BTRFS_I(inode)->root;
  4190. struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
  4191. struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
  4192. /*
  4193. * We need to hold space in order to delete our orphan item once we've
  4194. * added it, so this takes the reservation so we can release it later
  4195. * when we are truly done with the orphan item.
  4196. */
  4197. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  4198. trace_btrfs_space_reservation(root->fs_info, "orphan",
  4199. btrfs_ino(inode), num_bytes, 1);
  4200. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  4201. }
  4202. void btrfs_orphan_release_metadata(struct inode *inode)
  4203. {
  4204. struct btrfs_root *root = BTRFS_I(inode)->root;
  4205. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  4206. trace_btrfs_space_reservation(root->fs_info, "orphan",
  4207. btrfs_ino(inode), num_bytes, 0);
  4208. btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
  4209. }
  4210. /*
  4211. * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
  4212. * root: the root of the parent directory
  4213. * rsv: block reservation
  4214. * items: the number of items that we need do reservation
  4215. * qgroup_reserved: used to return the reserved size in qgroup
  4216. *
  4217. * This function is used to reserve the space for snapshot/subvolume
  4218. * creation and deletion. Those operations are different with the
  4219. * common file/directory operations, they change two fs/file trees
  4220. * and root tree, the number of items that the qgroup reserves is
  4221. * different with the free space reservation. So we can not use
  4222. * the space reseravtion mechanism in start_transaction().
  4223. */
  4224. int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
  4225. struct btrfs_block_rsv *rsv,
  4226. int items,
  4227. u64 *qgroup_reserved,
  4228. bool use_global_rsv)
  4229. {
  4230. u64 num_bytes;
  4231. int ret;
  4232. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  4233. if (root->fs_info->quota_enabled) {
  4234. /* One for parent inode, two for dir entries */
  4235. num_bytes = 3 * root->leafsize;
  4236. ret = btrfs_qgroup_reserve(root, num_bytes);
  4237. if (ret)
  4238. return ret;
  4239. } else {
  4240. num_bytes = 0;
  4241. }
  4242. *qgroup_reserved = num_bytes;
  4243. num_bytes = btrfs_calc_trans_metadata_size(root, items);
  4244. rsv->space_info = __find_space_info(root->fs_info,
  4245. BTRFS_BLOCK_GROUP_METADATA);
  4246. ret = btrfs_block_rsv_add(root, rsv, num_bytes,
  4247. BTRFS_RESERVE_FLUSH_ALL);
  4248. if (ret == -ENOSPC && use_global_rsv)
  4249. ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
  4250. if (ret) {
  4251. if (*qgroup_reserved)
  4252. btrfs_qgroup_free(root, *qgroup_reserved);
  4253. }
  4254. return ret;
  4255. }
  4256. void btrfs_subvolume_release_metadata(struct btrfs_root *root,
  4257. struct btrfs_block_rsv *rsv,
  4258. u64 qgroup_reserved)
  4259. {
  4260. btrfs_block_rsv_release(root, rsv, (u64)-1);
  4261. if (qgroup_reserved)
  4262. btrfs_qgroup_free(root, qgroup_reserved);
  4263. }
  4264. /**
  4265. * drop_outstanding_extent - drop an outstanding extent
  4266. * @inode: the inode we're dropping the extent for
  4267. *
  4268. * This is called when we are freeing up an outstanding extent, either called
  4269. * after an error or after an extent is written. This will return the number of
  4270. * reserved extents that need to be freed. This must be called with
  4271. * BTRFS_I(inode)->lock held.
  4272. */
  4273. static unsigned drop_outstanding_extent(struct inode *inode)
  4274. {
  4275. unsigned drop_inode_space = 0;
  4276. unsigned dropped_extents = 0;
  4277. BUG_ON(!BTRFS_I(inode)->outstanding_extents);
  4278. BTRFS_I(inode)->outstanding_extents--;
  4279. if (BTRFS_I(inode)->outstanding_extents == 0 &&
  4280. test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  4281. &BTRFS_I(inode)->runtime_flags))
  4282. drop_inode_space = 1;
  4283. /*
  4284. * If we have more or the same amount of outsanding extents than we have
  4285. * reserved then we need to leave the reserved extents count alone.
  4286. */
  4287. if (BTRFS_I(inode)->outstanding_extents >=
  4288. BTRFS_I(inode)->reserved_extents)
  4289. return drop_inode_space;
  4290. dropped_extents = BTRFS_I(inode)->reserved_extents -
  4291. BTRFS_I(inode)->outstanding_extents;
  4292. BTRFS_I(inode)->reserved_extents -= dropped_extents;
  4293. return dropped_extents + drop_inode_space;
  4294. }
  4295. /**
  4296. * calc_csum_metadata_size - return the amount of metada space that must be
  4297. * reserved/free'd for the given bytes.
  4298. * @inode: the inode we're manipulating
  4299. * @num_bytes: the number of bytes in question
  4300. * @reserve: 1 if we are reserving space, 0 if we are freeing space
  4301. *
  4302. * This adjusts the number of csum_bytes in the inode and then returns the
  4303. * correct amount of metadata that must either be reserved or freed. We
  4304. * calculate how many checksums we can fit into one leaf and then divide the
  4305. * number of bytes that will need to be checksumed by this value to figure out
  4306. * how many checksums will be required. If we are adding bytes then the number
  4307. * may go up and we will return the number of additional bytes that must be
  4308. * reserved. If it is going down we will return the number of bytes that must
  4309. * be freed.
  4310. *
  4311. * This must be called with BTRFS_I(inode)->lock held.
  4312. */
  4313. static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
  4314. int reserve)
  4315. {
  4316. struct btrfs_root *root = BTRFS_I(inode)->root;
  4317. u64 csum_size;
  4318. int num_csums_per_leaf;
  4319. int num_csums;
  4320. int old_csums;
  4321. if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
  4322. BTRFS_I(inode)->csum_bytes == 0)
  4323. return 0;
  4324. old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
  4325. if (reserve)
  4326. BTRFS_I(inode)->csum_bytes += num_bytes;
  4327. else
  4328. BTRFS_I(inode)->csum_bytes -= num_bytes;
  4329. csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
  4330. num_csums_per_leaf = (int)div64_u64(csum_size,
  4331. sizeof(struct btrfs_csum_item) +
  4332. sizeof(struct btrfs_disk_key));
  4333. num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
  4334. num_csums = num_csums + num_csums_per_leaf - 1;
  4335. num_csums = num_csums / num_csums_per_leaf;
  4336. old_csums = old_csums + num_csums_per_leaf - 1;
  4337. old_csums = old_csums / num_csums_per_leaf;
  4338. /* No change, no need to reserve more */
  4339. if (old_csums == num_csums)
  4340. return 0;
  4341. if (reserve)
  4342. return btrfs_calc_trans_metadata_size(root,
  4343. num_csums - old_csums);
  4344. return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
  4345. }
  4346. int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
  4347. {
  4348. struct btrfs_root *root = BTRFS_I(inode)->root;
  4349. struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
  4350. u64 to_reserve = 0;
  4351. u64 csum_bytes;
  4352. unsigned nr_extents = 0;
  4353. int extra_reserve = 0;
  4354. enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
  4355. int ret = 0;
  4356. bool delalloc_lock = true;
  4357. u64 to_free = 0;
  4358. unsigned dropped;
  4359. /* If we are a free space inode we need to not flush since we will be in
  4360. * the middle of a transaction commit. We also don't need the delalloc
  4361. * mutex since we won't race with anybody. We need this mostly to make
  4362. * lockdep shut its filthy mouth.
  4363. */
  4364. if (btrfs_is_free_space_inode(inode)) {
  4365. flush = BTRFS_RESERVE_NO_FLUSH;
  4366. delalloc_lock = false;
  4367. }
  4368. if (flush != BTRFS_RESERVE_NO_FLUSH &&
  4369. btrfs_transaction_in_commit(root->fs_info))
  4370. schedule_timeout(1);
  4371. if (delalloc_lock)
  4372. mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
  4373. num_bytes = ALIGN(num_bytes, root->sectorsize);
  4374. spin_lock(&BTRFS_I(inode)->lock);
  4375. BTRFS_I(inode)->outstanding_extents++;
  4376. if (BTRFS_I(inode)->outstanding_extents >
  4377. BTRFS_I(inode)->reserved_extents)
  4378. nr_extents = BTRFS_I(inode)->outstanding_extents -
  4379. BTRFS_I(inode)->reserved_extents;
  4380. /*
  4381. * Add an item to reserve for updating the inode when we complete the
  4382. * delalloc io.
  4383. */
  4384. if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  4385. &BTRFS_I(inode)->runtime_flags)) {
  4386. nr_extents++;
  4387. extra_reserve = 1;
  4388. }
  4389. to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
  4390. to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
  4391. csum_bytes = BTRFS_I(inode)->csum_bytes;
  4392. spin_unlock(&BTRFS_I(inode)->lock);
  4393. if (root->fs_info->quota_enabled) {
  4394. ret = btrfs_qgroup_reserve(root, num_bytes +
  4395. nr_extents * root->leafsize);
  4396. if (ret)
  4397. goto out_fail;
  4398. }
  4399. ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
  4400. if (unlikely(ret)) {
  4401. if (root->fs_info->quota_enabled)
  4402. btrfs_qgroup_free(root, num_bytes +
  4403. nr_extents * root->leafsize);
  4404. goto out_fail;
  4405. }
  4406. spin_lock(&BTRFS_I(inode)->lock);
  4407. if (extra_reserve) {
  4408. set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  4409. &BTRFS_I(inode)->runtime_flags);
  4410. nr_extents--;
  4411. }
  4412. BTRFS_I(inode)->reserved_extents += nr_extents;
  4413. spin_unlock(&BTRFS_I(inode)->lock);
  4414. if (delalloc_lock)
  4415. mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  4416. if (to_reserve)
  4417. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  4418. btrfs_ino(inode), to_reserve, 1);
  4419. block_rsv_add_bytes(block_rsv, to_reserve, 1);
  4420. return 0;
  4421. out_fail:
  4422. spin_lock(&BTRFS_I(inode)->lock);
  4423. dropped = drop_outstanding_extent(inode);
  4424. /*
  4425. * If the inodes csum_bytes is the same as the original
  4426. * csum_bytes then we know we haven't raced with any free()ers
  4427. * so we can just reduce our inodes csum bytes and carry on.
  4428. */
  4429. if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
  4430. calc_csum_metadata_size(inode, num_bytes, 0);
  4431. } else {
  4432. u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
  4433. u64 bytes;
  4434. /*
  4435. * This is tricky, but first we need to figure out how much we
  4436. * free'd from any free-ers that occured during this
  4437. * reservation, so we reset ->csum_bytes to the csum_bytes
  4438. * before we dropped our lock, and then call the free for the
  4439. * number of bytes that were freed while we were trying our
  4440. * reservation.
  4441. */
  4442. bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
  4443. BTRFS_I(inode)->csum_bytes = csum_bytes;
  4444. to_free = calc_csum_metadata_size(inode, bytes, 0);
  4445. /*
  4446. * Now we need to see how much we would have freed had we not
  4447. * been making this reservation and our ->csum_bytes were not
  4448. * artificially inflated.
  4449. */
  4450. BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
  4451. bytes = csum_bytes - orig_csum_bytes;
  4452. bytes = calc_csum_metadata_size(inode, bytes, 0);
  4453. /*
  4454. * Now reset ->csum_bytes to what it should be. If bytes is
  4455. * more than to_free then we would have free'd more space had we
  4456. * not had an artificially high ->csum_bytes, so we need to free
  4457. * the remainder. If bytes is the same or less then we don't
  4458. * need to do anything, the other free-ers did the correct
  4459. * thing.
  4460. */
  4461. BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
  4462. if (bytes > to_free)
  4463. to_free = bytes - to_free;
  4464. else
  4465. to_free = 0;
  4466. }
  4467. spin_unlock(&BTRFS_I(inode)->lock);
  4468. if (dropped)
  4469. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  4470. if (to_free) {
  4471. btrfs_block_rsv_release(root, block_rsv, to_free);
  4472. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  4473. btrfs_ino(inode), to_free, 0);
  4474. }
  4475. if (delalloc_lock)
  4476. mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  4477. return ret;
  4478. }
  4479. /**
  4480. * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
  4481. * @inode: the inode to release the reservation for
  4482. * @num_bytes: the number of bytes we're releasing
  4483. *
  4484. * This will release the metadata reservation for an inode. This can be called
  4485. * once we complete IO for a given set of bytes to release their metadata
  4486. * reservations.
  4487. */
  4488. void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
  4489. {
  4490. struct btrfs_root *root = BTRFS_I(inode)->root;
  4491. u64 to_free = 0;
  4492. unsigned dropped;
  4493. num_bytes = ALIGN(num_bytes, root->sectorsize);
  4494. spin_lock(&BTRFS_I(inode)->lock);
  4495. dropped = drop_outstanding_extent(inode);
  4496. if (num_bytes)
  4497. to_free = calc_csum_metadata_size(inode, num_bytes, 0);
  4498. spin_unlock(&BTRFS_I(inode)->lock);
  4499. if (dropped > 0)
  4500. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  4501. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  4502. btrfs_ino(inode), to_free, 0);
  4503. if (root->fs_info->quota_enabled) {
  4504. btrfs_qgroup_free(root, num_bytes +
  4505. dropped * root->leafsize);
  4506. }
  4507. btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
  4508. to_free);
  4509. }
  4510. /**
  4511. * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
  4512. * @inode: inode we're writing to
  4513. * @num_bytes: the number of bytes we want to allocate
  4514. *
  4515. * This will do the following things
  4516. *
  4517. * o reserve space in the data space info for num_bytes
  4518. * o reserve space in the metadata space info based on number of outstanding
  4519. * extents and how much csums will be needed
  4520. * o add to the inodes ->delalloc_bytes
  4521. * o add it to the fs_info's delalloc inodes list.
  4522. *
  4523. * This will return 0 for success and -ENOSPC if there is no space left.
  4524. */
  4525. int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
  4526. {
  4527. int ret;
  4528. ret = btrfs_check_data_free_space(inode, num_bytes);
  4529. if (ret)
  4530. return ret;
  4531. ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
  4532. if (ret) {
  4533. btrfs_free_reserved_data_space(inode, num_bytes);
  4534. return ret;
  4535. }
  4536. return 0;
  4537. }
  4538. /**
  4539. * btrfs_delalloc_release_space - release data and metadata space for delalloc
  4540. * @inode: inode we're releasing space for
  4541. * @num_bytes: the number of bytes we want to free up
  4542. *
  4543. * This must be matched with a call to btrfs_delalloc_reserve_space. This is
  4544. * called in the case that we don't need the metadata AND data reservations
  4545. * anymore. So if there is an error or we insert an inline extent.
  4546. *
  4547. * This function will release the metadata space that was not used and will
  4548. * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
  4549. * list if there are no delalloc bytes left.
  4550. */
  4551. void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
  4552. {
  4553. btrfs_delalloc_release_metadata(inode, num_bytes);
  4554. btrfs_free_reserved_data_space(inode, num_bytes);
  4555. }
  4556. static int update_block_group(struct btrfs_root *root,
  4557. u64 bytenr, u64 num_bytes, int alloc)
  4558. {
  4559. struct btrfs_block_group_cache *cache = NULL;
  4560. struct btrfs_fs_info *info = root->fs_info;
  4561. u64 total = num_bytes;
  4562. u64 old_val;
  4563. u64 byte_in_group;
  4564. int factor;
  4565. /* block accounting for super block */
  4566. spin_lock(&info->delalloc_root_lock);
  4567. old_val = btrfs_super_bytes_used(info->super_copy);
  4568. if (alloc)
  4569. old_val += num_bytes;
  4570. else
  4571. old_val -= num_bytes;
  4572. btrfs_set_super_bytes_used(info->super_copy, old_val);
  4573. spin_unlock(&info->delalloc_root_lock);
  4574. while (total) {
  4575. cache = btrfs_lookup_block_group(info, bytenr);
  4576. if (!cache)
  4577. return -ENOENT;
  4578. if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
  4579. BTRFS_BLOCK_GROUP_RAID1 |
  4580. BTRFS_BLOCK_GROUP_RAID10))
  4581. factor = 2;
  4582. else
  4583. factor = 1;
  4584. /*
  4585. * If this block group has free space cache written out, we
  4586. * need to make sure to load it if we are removing space. This
  4587. * is because we need the unpinning stage to actually add the
  4588. * space back to the block group, otherwise we will leak space.
  4589. */
  4590. if (!alloc && cache->cached == BTRFS_CACHE_NO)
  4591. cache_block_group(cache, 1);
  4592. byte_in_group = bytenr - cache->key.objectid;
  4593. WARN_ON(byte_in_group > cache->key.offset);
  4594. spin_lock(&cache->space_info->lock);
  4595. spin_lock(&cache->lock);
  4596. if (btrfs_test_opt(root, SPACE_CACHE) &&
  4597. cache->disk_cache_state < BTRFS_DC_CLEAR)
  4598. cache->disk_cache_state = BTRFS_DC_CLEAR;
  4599. cache->dirty = 1;
  4600. old_val = btrfs_block_group_used(&cache->item);
  4601. num_bytes = min(total, cache->key.offset - byte_in_group);
  4602. if (alloc) {
  4603. old_val += num_bytes;
  4604. btrfs_set_block_group_used(&cache->item, old_val);
  4605. cache->reserved -= num_bytes;
  4606. cache->space_info->bytes_reserved -= num_bytes;
  4607. cache->space_info->bytes_used += num_bytes;
  4608. cache->space_info->disk_used += num_bytes * factor;
  4609. spin_unlock(&cache->lock);
  4610. spin_unlock(&cache->space_info->lock);
  4611. } else {
  4612. old_val -= num_bytes;
  4613. btrfs_set_block_group_used(&cache->item, old_val);
  4614. cache->pinned += num_bytes;
  4615. cache->space_info->bytes_pinned += num_bytes;
  4616. cache->space_info->bytes_used -= num_bytes;
  4617. cache->space_info->disk_used -= num_bytes * factor;
  4618. spin_unlock(&cache->lock);
  4619. spin_unlock(&cache->space_info->lock);
  4620. set_extent_dirty(info->pinned_extents,
  4621. bytenr, bytenr + num_bytes - 1,
  4622. GFP_NOFS | __GFP_NOFAIL);
  4623. }
  4624. btrfs_put_block_group(cache);
  4625. total -= num_bytes;
  4626. bytenr += num_bytes;
  4627. }
  4628. return 0;
  4629. }
  4630. static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
  4631. {
  4632. struct btrfs_block_group_cache *cache;
  4633. u64 bytenr;
  4634. spin_lock(&root->fs_info->block_group_cache_lock);
  4635. bytenr = root->fs_info->first_logical_byte;
  4636. spin_unlock(&root->fs_info->block_group_cache_lock);
  4637. if (bytenr < (u64)-1)
  4638. return bytenr;
  4639. cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
  4640. if (!cache)
  4641. return 0;
  4642. bytenr = cache->key.objectid;
  4643. btrfs_put_block_group(cache);
  4644. return bytenr;
  4645. }
  4646. static int pin_down_extent(struct btrfs_root *root,
  4647. struct btrfs_block_group_cache *cache,
  4648. u64 bytenr, u64 num_bytes, int reserved)
  4649. {
  4650. spin_lock(&cache->space_info->lock);
  4651. spin_lock(&cache->lock);
  4652. cache->pinned += num_bytes;
  4653. cache->space_info->bytes_pinned += num_bytes;
  4654. if (reserved) {
  4655. cache->reserved -= num_bytes;
  4656. cache->space_info->bytes_reserved -= num_bytes;
  4657. }
  4658. spin_unlock(&cache->lock);
  4659. spin_unlock(&cache->space_info->lock);
  4660. set_extent_dirty(root->fs_info->pinned_extents, bytenr,
  4661. bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
  4662. if (reserved)
  4663. trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
  4664. return 0;
  4665. }
  4666. /*
  4667. * this function must be called within transaction
  4668. */
  4669. int btrfs_pin_extent(struct btrfs_root *root,
  4670. u64 bytenr, u64 num_bytes, int reserved)
  4671. {
  4672. struct btrfs_block_group_cache *cache;
  4673. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  4674. BUG_ON(!cache); /* Logic error */
  4675. pin_down_extent(root, cache, bytenr, num_bytes, reserved);
  4676. btrfs_put_block_group(cache);
  4677. return 0;
  4678. }
  4679. /*
  4680. * this function must be called within transaction
  4681. */
  4682. int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
  4683. u64 bytenr, u64 num_bytes)
  4684. {
  4685. struct btrfs_block_group_cache *cache;
  4686. int ret;
  4687. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  4688. if (!cache)
  4689. return -EINVAL;
  4690. /*
  4691. * pull in the free space cache (if any) so that our pin
  4692. * removes the free space from the cache. We have load_only set
  4693. * to one because the slow code to read in the free extents does check
  4694. * the pinned extents.
  4695. */
  4696. cache_block_group(cache, 1);
  4697. pin_down_extent(root, cache, bytenr, num_bytes, 0);
  4698. /* remove us from the free space cache (if we're there at all) */
  4699. ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
  4700. btrfs_put_block_group(cache);
  4701. return ret;
  4702. }
  4703. static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
  4704. {
  4705. int ret;
  4706. struct btrfs_block_group_cache *block_group;
  4707. struct btrfs_caching_control *caching_ctl;
  4708. block_group = btrfs_lookup_block_group(root->fs_info, start);
  4709. if (!block_group)
  4710. return -EINVAL;
  4711. cache_block_group(block_group, 0);
  4712. caching_ctl = get_caching_control(block_group);
  4713. if (!caching_ctl) {
  4714. /* Logic error */
  4715. BUG_ON(!block_group_cache_done(block_group));
  4716. ret = btrfs_remove_free_space(block_group, start, num_bytes);
  4717. } else {
  4718. mutex_lock(&caching_ctl->mutex);
  4719. if (start >= caching_ctl->progress) {
  4720. ret = add_excluded_extent(root, start, num_bytes);
  4721. } else if (start + num_bytes <= caching_ctl->progress) {
  4722. ret = btrfs_remove_free_space(block_group,
  4723. start, num_bytes);
  4724. } else {
  4725. num_bytes = caching_ctl->progress - start;
  4726. ret = btrfs_remove_free_space(block_group,
  4727. start, num_bytes);
  4728. if (ret)
  4729. goto out_lock;
  4730. num_bytes = (start + num_bytes) -
  4731. caching_ctl->progress;
  4732. start = caching_ctl->progress;
  4733. ret = add_excluded_extent(root, start, num_bytes);
  4734. }
  4735. out_lock:
  4736. mutex_unlock(&caching_ctl->mutex);
  4737. put_caching_control(caching_ctl);
  4738. }
  4739. btrfs_put_block_group(block_group);
  4740. return ret;
  4741. }
  4742. int btrfs_exclude_logged_extents(struct btrfs_root *log,
  4743. struct extent_buffer *eb)
  4744. {
  4745. struct btrfs_file_extent_item *item;
  4746. struct btrfs_key key;
  4747. int found_type;
  4748. int i;
  4749. if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
  4750. return 0;
  4751. for (i = 0; i < btrfs_header_nritems(eb); i++) {
  4752. btrfs_item_key_to_cpu(eb, &key, i);
  4753. if (key.type != BTRFS_EXTENT_DATA_KEY)
  4754. continue;
  4755. item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
  4756. found_type = btrfs_file_extent_type(eb, item);
  4757. if (found_type == BTRFS_FILE_EXTENT_INLINE)
  4758. continue;
  4759. if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
  4760. continue;
  4761. key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
  4762. key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
  4763. __exclude_logged_extent(log, key.objectid, key.offset);
  4764. }
  4765. return 0;
  4766. }
  4767. /**
  4768. * btrfs_update_reserved_bytes - update the block_group and space info counters
  4769. * @cache: The cache we are manipulating
  4770. * @num_bytes: The number of bytes in question
  4771. * @reserve: One of the reservation enums
  4772. *
  4773. * This is called by the allocator when it reserves space, or by somebody who is
  4774. * freeing space that was never actually used on disk. For example if you
  4775. * reserve some space for a new leaf in transaction A and before transaction A
  4776. * commits you free that leaf, you call this with reserve set to 0 in order to
  4777. * clear the reservation.
  4778. *
  4779. * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
  4780. * ENOSPC accounting. For data we handle the reservation through clearing the
  4781. * delalloc bits in the io_tree. We have to do this since we could end up
  4782. * allocating less disk space for the amount of data we have reserved in the
  4783. * case of compression.
  4784. *
  4785. * If this is a reservation and the block group has become read only we cannot
  4786. * make the reservation and return -EAGAIN, otherwise this function always
  4787. * succeeds.
  4788. */
  4789. static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
  4790. u64 num_bytes, int reserve)
  4791. {
  4792. struct btrfs_space_info *space_info = cache->space_info;
  4793. int ret = 0;
  4794. spin_lock(&space_info->lock);
  4795. spin_lock(&cache->lock);
  4796. if (reserve != RESERVE_FREE) {
  4797. if (cache->ro) {
  4798. ret = -EAGAIN;
  4799. } else {
  4800. cache->reserved += num_bytes;
  4801. space_info->bytes_reserved += num_bytes;
  4802. if (reserve == RESERVE_ALLOC) {
  4803. trace_btrfs_space_reservation(cache->fs_info,
  4804. "space_info", space_info->flags,
  4805. num_bytes, 0);
  4806. space_info->bytes_may_use -= num_bytes;
  4807. }
  4808. }
  4809. } else {
  4810. if (cache->ro)
  4811. space_info->bytes_readonly += num_bytes;
  4812. cache->reserved -= num_bytes;
  4813. space_info->bytes_reserved -= num_bytes;
  4814. }
  4815. spin_unlock(&cache->lock);
  4816. spin_unlock(&space_info->lock);
  4817. return ret;
  4818. }
  4819. void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
  4820. struct btrfs_root *root)
  4821. {
  4822. struct btrfs_fs_info *fs_info = root->fs_info;
  4823. struct btrfs_caching_control *next;
  4824. struct btrfs_caching_control *caching_ctl;
  4825. struct btrfs_block_group_cache *cache;
  4826. struct btrfs_space_info *space_info;
  4827. down_write(&fs_info->extent_commit_sem);
  4828. list_for_each_entry_safe(caching_ctl, next,
  4829. &fs_info->caching_block_groups, list) {
  4830. cache = caching_ctl->block_group;
  4831. if (block_group_cache_done(cache)) {
  4832. cache->last_byte_to_unpin = (u64)-1;
  4833. list_del_init(&caching_ctl->list);
  4834. put_caching_control(caching_ctl);
  4835. } else {
  4836. cache->last_byte_to_unpin = caching_ctl->progress;
  4837. }
  4838. }
  4839. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  4840. fs_info->pinned_extents = &fs_info->freed_extents[1];
  4841. else
  4842. fs_info->pinned_extents = &fs_info->freed_extents[0];
  4843. up_write(&fs_info->extent_commit_sem);
  4844. list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
  4845. percpu_counter_set(&space_info->total_bytes_pinned, 0);
  4846. update_global_block_rsv(fs_info);
  4847. }
  4848. static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  4849. {
  4850. struct btrfs_fs_info *fs_info = root->fs_info;
  4851. struct btrfs_block_group_cache *cache = NULL;
  4852. struct btrfs_space_info *space_info;
  4853. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4854. u64 len;
  4855. bool readonly;
  4856. while (start <= end) {
  4857. readonly = false;
  4858. if (!cache ||
  4859. start >= cache->key.objectid + cache->key.offset) {
  4860. if (cache)
  4861. btrfs_put_block_group(cache);
  4862. cache = btrfs_lookup_block_group(fs_info, start);
  4863. BUG_ON(!cache); /* Logic error */
  4864. }
  4865. len = cache->key.objectid + cache->key.offset - start;
  4866. len = min(len, end + 1 - start);
  4867. if (start < cache->last_byte_to_unpin) {
  4868. len = min(len, cache->last_byte_to_unpin - start);
  4869. btrfs_add_free_space(cache, start, len);
  4870. }
  4871. start += len;
  4872. space_info = cache->space_info;
  4873. spin_lock(&space_info->lock);
  4874. spin_lock(&cache->lock);
  4875. cache->pinned -= len;
  4876. space_info->bytes_pinned -= len;
  4877. if (cache->ro) {
  4878. space_info->bytes_readonly += len;
  4879. readonly = true;
  4880. }
  4881. spin_unlock(&cache->lock);
  4882. if (!readonly && global_rsv->space_info == space_info) {
  4883. spin_lock(&global_rsv->lock);
  4884. if (!global_rsv->full) {
  4885. len = min(len, global_rsv->size -
  4886. global_rsv->reserved);
  4887. global_rsv->reserved += len;
  4888. space_info->bytes_may_use += len;
  4889. if (global_rsv->reserved >= global_rsv->size)
  4890. global_rsv->full = 1;
  4891. }
  4892. spin_unlock(&global_rsv->lock);
  4893. }
  4894. spin_unlock(&space_info->lock);
  4895. }
  4896. if (cache)
  4897. btrfs_put_block_group(cache);
  4898. return 0;
  4899. }
  4900. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  4901. struct btrfs_root *root)
  4902. {
  4903. struct btrfs_fs_info *fs_info = root->fs_info;
  4904. struct extent_io_tree *unpin;
  4905. u64 start;
  4906. u64 end;
  4907. int ret;
  4908. if (trans->aborted)
  4909. return 0;
  4910. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  4911. unpin = &fs_info->freed_extents[1];
  4912. else
  4913. unpin = &fs_info->freed_extents[0];
  4914. while (1) {
  4915. ret = find_first_extent_bit(unpin, 0, &start, &end,
  4916. EXTENT_DIRTY, NULL);
  4917. if (ret)
  4918. break;
  4919. if (btrfs_test_opt(root, DISCARD))
  4920. ret = btrfs_discard_extent(root, start,
  4921. end + 1 - start, NULL);
  4922. clear_extent_dirty(unpin, start, end, GFP_NOFS);
  4923. unpin_extent_range(root, start, end);
  4924. cond_resched();
  4925. }
  4926. return 0;
  4927. }
  4928. static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
  4929. u64 owner, u64 root_objectid)
  4930. {
  4931. struct btrfs_space_info *space_info;
  4932. u64 flags;
  4933. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  4934. if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
  4935. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  4936. else
  4937. flags = BTRFS_BLOCK_GROUP_METADATA;
  4938. } else {
  4939. flags = BTRFS_BLOCK_GROUP_DATA;
  4940. }
  4941. space_info = __find_space_info(fs_info, flags);
  4942. BUG_ON(!space_info); /* Logic bug */
  4943. percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
  4944. }
  4945. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  4946. struct btrfs_root *root,
  4947. u64 bytenr, u64 num_bytes, u64 parent,
  4948. u64 root_objectid, u64 owner_objectid,
  4949. u64 owner_offset, int refs_to_drop,
  4950. struct btrfs_delayed_extent_op *extent_op)
  4951. {
  4952. struct btrfs_key key;
  4953. struct btrfs_path *path;
  4954. struct btrfs_fs_info *info = root->fs_info;
  4955. struct btrfs_root *extent_root = info->extent_root;
  4956. struct extent_buffer *leaf;
  4957. struct btrfs_extent_item *ei;
  4958. struct btrfs_extent_inline_ref *iref;
  4959. int ret;
  4960. int is_data;
  4961. int extent_slot = 0;
  4962. int found_extent = 0;
  4963. int num_to_del = 1;
  4964. u32 item_size;
  4965. u64 refs;
  4966. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  4967. SKINNY_METADATA);
  4968. path = btrfs_alloc_path();
  4969. if (!path)
  4970. return -ENOMEM;
  4971. path->reada = 1;
  4972. path->leave_spinning = 1;
  4973. is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
  4974. BUG_ON(!is_data && refs_to_drop != 1);
  4975. if (is_data)
  4976. skinny_metadata = 0;
  4977. ret = lookup_extent_backref(trans, extent_root, path, &iref,
  4978. bytenr, num_bytes, parent,
  4979. root_objectid, owner_objectid,
  4980. owner_offset);
  4981. if (ret == 0) {
  4982. extent_slot = path->slots[0];
  4983. while (extent_slot >= 0) {
  4984. btrfs_item_key_to_cpu(path->nodes[0], &key,
  4985. extent_slot);
  4986. if (key.objectid != bytenr)
  4987. break;
  4988. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  4989. key.offset == num_bytes) {
  4990. found_extent = 1;
  4991. break;
  4992. }
  4993. if (key.type == BTRFS_METADATA_ITEM_KEY &&
  4994. key.offset == owner_objectid) {
  4995. found_extent = 1;
  4996. break;
  4997. }
  4998. if (path->slots[0] - extent_slot > 5)
  4999. break;
  5000. extent_slot--;
  5001. }
  5002. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  5003. item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
  5004. if (found_extent && item_size < sizeof(*ei))
  5005. found_extent = 0;
  5006. #endif
  5007. if (!found_extent) {
  5008. BUG_ON(iref);
  5009. ret = remove_extent_backref(trans, extent_root, path,
  5010. NULL, refs_to_drop,
  5011. is_data);
  5012. if (ret) {
  5013. btrfs_abort_transaction(trans, extent_root, ret);
  5014. goto out;
  5015. }
  5016. btrfs_release_path(path);
  5017. path->leave_spinning = 1;
  5018. key.objectid = bytenr;
  5019. key.type = BTRFS_EXTENT_ITEM_KEY;
  5020. key.offset = num_bytes;
  5021. if (!is_data && skinny_metadata) {
  5022. key.type = BTRFS_METADATA_ITEM_KEY;
  5023. key.offset = owner_objectid;
  5024. }
  5025. ret = btrfs_search_slot(trans, extent_root,
  5026. &key, path, -1, 1);
  5027. if (ret > 0 && skinny_metadata && path->slots[0]) {
  5028. /*
  5029. * Couldn't find our skinny metadata item,
  5030. * see if we have ye olde extent item.
  5031. */
  5032. path->slots[0]--;
  5033. btrfs_item_key_to_cpu(path->nodes[0], &key,
  5034. path->slots[0]);
  5035. if (key.objectid == bytenr &&
  5036. key.type == BTRFS_EXTENT_ITEM_KEY &&
  5037. key.offset == num_bytes)
  5038. ret = 0;
  5039. }
  5040. if (ret > 0 && skinny_metadata) {
  5041. skinny_metadata = false;
  5042. key.type = BTRFS_EXTENT_ITEM_KEY;
  5043. key.offset = num_bytes;
  5044. btrfs_release_path(path);
  5045. ret = btrfs_search_slot(trans, extent_root,
  5046. &key, path, -1, 1);
  5047. }
  5048. if (ret) {
  5049. btrfs_err(info, "umm, got %d back from search, was looking for %llu",
  5050. ret, bytenr);
  5051. if (ret > 0)
  5052. btrfs_print_leaf(extent_root,
  5053. path->nodes[0]);
  5054. }
  5055. if (ret < 0) {
  5056. btrfs_abort_transaction(trans, extent_root, ret);
  5057. goto out;
  5058. }
  5059. extent_slot = path->slots[0];
  5060. }
  5061. } else if (WARN_ON(ret == -ENOENT)) {
  5062. btrfs_print_leaf(extent_root, path->nodes[0]);
  5063. btrfs_err(info,
  5064. "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
  5065. bytenr, parent, root_objectid, owner_objectid,
  5066. owner_offset);
  5067. } else {
  5068. btrfs_abort_transaction(trans, extent_root, ret);
  5069. goto out;
  5070. }
  5071. leaf = path->nodes[0];
  5072. item_size = btrfs_item_size_nr(leaf, extent_slot);
  5073. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  5074. if (item_size < sizeof(*ei)) {
  5075. BUG_ON(found_extent || extent_slot != path->slots[0]);
  5076. ret = convert_extent_item_v0(trans, extent_root, path,
  5077. owner_objectid, 0);
  5078. if (ret < 0) {
  5079. btrfs_abort_transaction(trans, extent_root, ret);
  5080. goto out;
  5081. }
  5082. btrfs_release_path(path);
  5083. path->leave_spinning = 1;
  5084. key.objectid = bytenr;
  5085. key.type = BTRFS_EXTENT_ITEM_KEY;
  5086. key.offset = num_bytes;
  5087. ret = btrfs_search_slot(trans, extent_root, &key, path,
  5088. -1, 1);
  5089. if (ret) {
  5090. btrfs_err(info, "umm, got %d back from search, was looking for %llu",
  5091. ret, bytenr);
  5092. btrfs_print_leaf(extent_root, path->nodes[0]);
  5093. }
  5094. if (ret < 0) {
  5095. btrfs_abort_transaction(trans, extent_root, ret);
  5096. goto out;
  5097. }
  5098. extent_slot = path->slots[0];
  5099. leaf = path->nodes[0];
  5100. item_size = btrfs_item_size_nr(leaf, extent_slot);
  5101. }
  5102. #endif
  5103. BUG_ON(item_size < sizeof(*ei));
  5104. ei = btrfs_item_ptr(leaf, extent_slot,
  5105. struct btrfs_extent_item);
  5106. if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
  5107. key.type == BTRFS_EXTENT_ITEM_KEY) {
  5108. struct btrfs_tree_block_info *bi;
  5109. BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
  5110. bi = (struct btrfs_tree_block_info *)(ei + 1);
  5111. WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
  5112. }
  5113. refs = btrfs_extent_refs(leaf, ei);
  5114. if (refs < refs_to_drop) {
  5115. btrfs_err(info, "trying to drop %d refs but we only have %Lu "
  5116. "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
  5117. ret = -EINVAL;
  5118. btrfs_abort_transaction(trans, extent_root, ret);
  5119. goto out;
  5120. }
  5121. refs -= refs_to_drop;
  5122. if (refs > 0) {
  5123. if (extent_op)
  5124. __run_delayed_extent_op(extent_op, leaf, ei);
  5125. /*
  5126. * In the case of inline back ref, reference count will
  5127. * be updated by remove_extent_backref
  5128. */
  5129. if (iref) {
  5130. BUG_ON(!found_extent);
  5131. } else {
  5132. btrfs_set_extent_refs(leaf, ei, refs);
  5133. btrfs_mark_buffer_dirty(leaf);
  5134. }
  5135. if (found_extent) {
  5136. ret = remove_extent_backref(trans, extent_root, path,
  5137. iref, refs_to_drop,
  5138. is_data);
  5139. if (ret) {
  5140. btrfs_abort_transaction(trans, extent_root, ret);
  5141. goto out;
  5142. }
  5143. }
  5144. add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
  5145. root_objectid);
  5146. } else {
  5147. if (found_extent) {
  5148. BUG_ON(is_data && refs_to_drop !=
  5149. extent_data_ref_count(root, path, iref));
  5150. if (iref) {
  5151. BUG_ON(path->slots[0] != extent_slot);
  5152. } else {
  5153. BUG_ON(path->slots[0] != extent_slot + 1);
  5154. path->slots[0] = extent_slot;
  5155. num_to_del = 2;
  5156. }
  5157. }
  5158. ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
  5159. num_to_del);
  5160. if (ret) {
  5161. btrfs_abort_transaction(trans, extent_root, ret);
  5162. goto out;
  5163. }
  5164. btrfs_release_path(path);
  5165. if (is_data) {
  5166. ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
  5167. if (ret) {
  5168. btrfs_abort_transaction(trans, extent_root, ret);
  5169. goto out;
  5170. }
  5171. }
  5172. ret = update_block_group(root, bytenr, num_bytes, 0);
  5173. if (ret) {
  5174. btrfs_abort_transaction(trans, extent_root, ret);
  5175. goto out;
  5176. }
  5177. }
  5178. out:
  5179. btrfs_free_path(path);
  5180. return ret;
  5181. }
  5182. /*
  5183. * when we free an block, it is possible (and likely) that we free the last
  5184. * delayed ref for that extent as well. This searches the delayed ref tree for
  5185. * a given extent, and if there are no other delayed refs to be processed, it
  5186. * removes it from the tree.
  5187. */
  5188. static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
  5189. struct btrfs_root *root, u64 bytenr)
  5190. {
  5191. struct btrfs_delayed_ref_head *head;
  5192. struct btrfs_delayed_ref_root *delayed_refs;
  5193. struct btrfs_delayed_ref_node *ref;
  5194. struct rb_node *node;
  5195. int ret = 0;
  5196. delayed_refs = &trans->transaction->delayed_refs;
  5197. spin_lock(&delayed_refs->lock);
  5198. head = btrfs_find_delayed_ref_head(trans, bytenr);
  5199. if (!head)
  5200. goto out;
  5201. node = rb_prev(&head->node.rb_node);
  5202. if (!node)
  5203. goto out;
  5204. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  5205. /* there are still entries for this ref, we can't drop it */
  5206. if (ref->bytenr == bytenr)
  5207. goto out;
  5208. if (head->extent_op) {
  5209. if (!head->must_insert_reserved)
  5210. goto out;
  5211. btrfs_free_delayed_extent_op(head->extent_op);
  5212. head->extent_op = NULL;
  5213. }
  5214. /*
  5215. * waiting for the lock here would deadlock. If someone else has it
  5216. * locked they are already in the process of dropping it anyway
  5217. */
  5218. if (!mutex_trylock(&head->mutex))
  5219. goto out;
  5220. /*
  5221. * at this point we have a head with no other entries. Go
  5222. * ahead and process it.
  5223. */
  5224. head->node.in_tree = 0;
  5225. rb_erase(&head->node.rb_node, &delayed_refs->root);
  5226. delayed_refs->num_entries--;
  5227. /*
  5228. * we don't take a ref on the node because we're removing it from the
  5229. * tree, so we just steal the ref the tree was holding.
  5230. */
  5231. delayed_refs->num_heads--;
  5232. if (list_empty(&head->cluster))
  5233. delayed_refs->num_heads_ready--;
  5234. list_del_init(&head->cluster);
  5235. spin_unlock(&delayed_refs->lock);
  5236. BUG_ON(head->extent_op);
  5237. if (head->must_insert_reserved)
  5238. ret = 1;
  5239. mutex_unlock(&head->mutex);
  5240. btrfs_put_delayed_ref(&head->node);
  5241. return ret;
  5242. out:
  5243. spin_unlock(&delayed_refs->lock);
  5244. return 0;
  5245. }
  5246. void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
  5247. struct btrfs_root *root,
  5248. struct extent_buffer *buf,
  5249. u64 parent, int last_ref)
  5250. {
  5251. struct btrfs_block_group_cache *cache = NULL;
  5252. int pin = 1;
  5253. int ret;
  5254. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  5255. ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
  5256. buf->start, buf->len,
  5257. parent, root->root_key.objectid,
  5258. btrfs_header_level(buf),
  5259. BTRFS_DROP_DELAYED_REF, NULL, 0);
  5260. BUG_ON(ret); /* -ENOMEM */
  5261. }
  5262. if (!last_ref)
  5263. return;
  5264. cache = btrfs_lookup_block_group(root->fs_info, buf->start);
  5265. if (btrfs_header_generation(buf) == trans->transid) {
  5266. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  5267. ret = check_ref_cleanup(trans, root, buf->start);
  5268. if (!ret)
  5269. goto out;
  5270. }
  5271. if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
  5272. pin_down_extent(root, cache, buf->start, buf->len, 1);
  5273. goto out;
  5274. }
  5275. WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
  5276. btrfs_add_free_space(cache, buf->start, buf->len);
  5277. btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
  5278. trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
  5279. pin = 0;
  5280. }
  5281. out:
  5282. if (pin)
  5283. add_pinned_bytes(root->fs_info, buf->len,
  5284. btrfs_header_level(buf),
  5285. root->root_key.objectid);
  5286. /*
  5287. * Deleting the buffer, clear the corrupt flag since it doesn't matter
  5288. * anymore.
  5289. */
  5290. clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
  5291. btrfs_put_block_group(cache);
  5292. }
  5293. /* Can return -ENOMEM */
  5294. int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  5295. u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
  5296. u64 owner, u64 offset, int for_cow)
  5297. {
  5298. int ret;
  5299. struct btrfs_fs_info *fs_info = root->fs_info;
  5300. add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
  5301. /*
  5302. * tree log blocks never actually go into the extent allocation
  5303. * tree, just update pinning info and exit early.
  5304. */
  5305. if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
  5306. WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
  5307. /* unlocks the pinned mutex */
  5308. btrfs_pin_extent(root, bytenr, num_bytes, 1);
  5309. ret = 0;
  5310. } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  5311. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  5312. num_bytes,
  5313. parent, root_objectid, (int)owner,
  5314. BTRFS_DROP_DELAYED_REF, NULL, for_cow);
  5315. } else {
  5316. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  5317. num_bytes,
  5318. parent, root_objectid, owner,
  5319. offset, BTRFS_DROP_DELAYED_REF,
  5320. NULL, for_cow);
  5321. }
  5322. return ret;
  5323. }
  5324. static u64 stripe_align(struct btrfs_root *root,
  5325. struct btrfs_block_group_cache *cache,
  5326. u64 val, u64 num_bytes)
  5327. {
  5328. u64 ret = ALIGN(val, root->stripesize);
  5329. return ret;
  5330. }
  5331. /*
  5332. * when we wait for progress in the block group caching, its because
  5333. * our allocation attempt failed at least once. So, we must sleep
  5334. * and let some progress happen before we try again.
  5335. *
  5336. * This function will sleep at least once waiting for new free space to
  5337. * show up, and then it will check the block group free space numbers
  5338. * for our min num_bytes. Another option is to have it go ahead
  5339. * and look in the rbtree for a free extent of a given size, but this
  5340. * is a good start.
  5341. *
  5342. * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
  5343. * any of the information in this block group.
  5344. */
  5345. static noinline void
  5346. wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
  5347. u64 num_bytes)
  5348. {
  5349. struct btrfs_caching_control *caching_ctl;
  5350. caching_ctl = get_caching_control(cache);
  5351. if (!caching_ctl)
  5352. return;
  5353. wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
  5354. (cache->free_space_ctl->free_space >= num_bytes));
  5355. put_caching_control(caching_ctl);
  5356. }
  5357. static noinline int
  5358. wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
  5359. {
  5360. struct btrfs_caching_control *caching_ctl;
  5361. int ret = 0;
  5362. caching_ctl = get_caching_control(cache);
  5363. if (!caching_ctl)
  5364. return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
  5365. wait_event(caching_ctl->wait, block_group_cache_done(cache));
  5366. if (cache->cached == BTRFS_CACHE_ERROR)
  5367. ret = -EIO;
  5368. put_caching_control(caching_ctl);
  5369. return ret;
  5370. }
  5371. int __get_raid_index(u64 flags)
  5372. {
  5373. if (flags & BTRFS_BLOCK_GROUP_RAID10)
  5374. return BTRFS_RAID_RAID10;
  5375. else if (flags & BTRFS_BLOCK_GROUP_RAID1)
  5376. return BTRFS_RAID_RAID1;
  5377. else if (flags & BTRFS_BLOCK_GROUP_DUP)
  5378. return BTRFS_RAID_DUP;
  5379. else if (flags & BTRFS_BLOCK_GROUP_RAID0)
  5380. return BTRFS_RAID_RAID0;
  5381. else if (flags & BTRFS_BLOCK_GROUP_RAID5)
  5382. return BTRFS_RAID_RAID5;
  5383. else if (flags & BTRFS_BLOCK_GROUP_RAID6)
  5384. return BTRFS_RAID_RAID6;
  5385. return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
  5386. }
  5387. static int get_block_group_index(struct btrfs_block_group_cache *cache)
  5388. {
  5389. return __get_raid_index(cache->flags);
  5390. }
  5391. enum btrfs_loop_type {
  5392. LOOP_CACHING_NOWAIT = 0,
  5393. LOOP_CACHING_WAIT = 1,
  5394. LOOP_ALLOC_CHUNK = 2,
  5395. LOOP_NO_EMPTY_SIZE = 3,
  5396. };
  5397. /*
  5398. * walks the btree of allocated extents and find a hole of a given size.
  5399. * The key ins is changed to record the hole:
  5400. * ins->objectid == start position
  5401. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  5402. * ins->offset == the size of the hole.
  5403. * Any available blocks before search_start are skipped.
  5404. *
  5405. * If there is no suitable free space, we will record the max size of
  5406. * the free space extent currently.
  5407. */
  5408. static noinline int find_free_extent(struct btrfs_root *orig_root,
  5409. u64 num_bytes, u64 empty_size,
  5410. u64 hint_byte, struct btrfs_key *ins,
  5411. u64 flags)
  5412. {
  5413. int ret = 0;
  5414. struct btrfs_root *root = orig_root->fs_info->extent_root;
  5415. struct btrfs_free_cluster *last_ptr = NULL;
  5416. struct btrfs_block_group_cache *block_group = NULL;
  5417. struct btrfs_block_group_cache *used_block_group;
  5418. u64 search_start = 0;
  5419. u64 max_extent_size = 0;
  5420. int empty_cluster = 2 * 1024 * 1024;
  5421. struct btrfs_space_info *space_info;
  5422. int loop = 0;
  5423. int index = __get_raid_index(flags);
  5424. int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
  5425. RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
  5426. bool found_uncached_bg = false;
  5427. bool failed_cluster_refill = false;
  5428. bool failed_alloc = false;
  5429. bool use_cluster = true;
  5430. bool have_caching_bg = false;
  5431. WARN_ON(num_bytes < root->sectorsize);
  5432. btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
  5433. ins->objectid = 0;
  5434. ins->offset = 0;
  5435. trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
  5436. space_info = __find_space_info(root->fs_info, flags);
  5437. if (!space_info) {
  5438. btrfs_err(root->fs_info, "No space info for %llu", flags);
  5439. return -ENOSPC;
  5440. }
  5441. /*
  5442. * If the space info is for both data and metadata it means we have a
  5443. * small filesystem and we can't use the clustering stuff.
  5444. */
  5445. if (btrfs_mixed_space_info(space_info))
  5446. use_cluster = false;
  5447. if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
  5448. last_ptr = &root->fs_info->meta_alloc_cluster;
  5449. if (!btrfs_test_opt(root, SSD))
  5450. empty_cluster = 64 * 1024;
  5451. }
  5452. if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
  5453. btrfs_test_opt(root, SSD)) {
  5454. last_ptr = &root->fs_info->data_alloc_cluster;
  5455. }
  5456. if (last_ptr) {
  5457. spin_lock(&last_ptr->lock);
  5458. if (last_ptr->block_group)
  5459. hint_byte = last_ptr->window_start;
  5460. spin_unlock(&last_ptr->lock);
  5461. }
  5462. search_start = max(search_start, first_logical_byte(root, 0));
  5463. search_start = max(search_start, hint_byte);
  5464. if (!last_ptr)
  5465. empty_cluster = 0;
  5466. if (search_start == hint_byte) {
  5467. block_group = btrfs_lookup_block_group(root->fs_info,
  5468. search_start);
  5469. used_block_group = block_group;
  5470. /*
  5471. * we don't want to use the block group if it doesn't match our
  5472. * allocation bits, or if its not cached.
  5473. *
  5474. * However if we are re-searching with an ideal block group
  5475. * picked out then we don't care that the block group is cached.
  5476. */
  5477. if (block_group && block_group_bits(block_group, flags) &&
  5478. block_group->cached != BTRFS_CACHE_NO) {
  5479. down_read(&space_info->groups_sem);
  5480. if (list_empty(&block_group->list) ||
  5481. block_group->ro) {
  5482. /*
  5483. * someone is removing this block group,
  5484. * we can't jump into the have_block_group
  5485. * target because our list pointers are not
  5486. * valid
  5487. */
  5488. btrfs_put_block_group(block_group);
  5489. up_read(&space_info->groups_sem);
  5490. } else {
  5491. index = get_block_group_index(block_group);
  5492. goto have_block_group;
  5493. }
  5494. } else if (block_group) {
  5495. btrfs_put_block_group(block_group);
  5496. }
  5497. }
  5498. search:
  5499. have_caching_bg = false;
  5500. down_read(&space_info->groups_sem);
  5501. list_for_each_entry(block_group, &space_info->block_groups[index],
  5502. list) {
  5503. u64 offset;
  5504. int cached;
  5505. used_block_group = block_group;
  5506. btrfs_get_block_group(block_group);
  5507. search_start = block_group->key.objectid;
  5508. /*
  5509. * this can happen if we end up cycling through all the
  5510. * raid types, but we want to make sure we only allocate
  5511. * for the proper type.
  5512. */
  5513. if (!block_group_bits(block_group, flags)) {
  5514. u64 extra = BTRFS_BLOCK_GROUP_DUP |
  5515. BTRFS_BLOCK_GROUP_RAID1 |
  5516. BTRFS_BLOCK_GROUP_RAID5 |
  5517. BTRFS_BLOCK_GROUP_RAID6 |
  5518. BTRFS_BLOCK_GROUP_RAID10;
  5519. /*
  5520. * if they asked for extra copies and this block group
  5521. * doesn't provide them, bail. This does allow us to
  5522. * fill raid0 from raid1.
  5523. */
  5524. if ((flags & extra) && !(block_group->flags & extra))
  5525. goto loop;
  5526. }
  5527. have_block_group:
  5528. cached = block_group_cache_done(block_group);
  5529. if (unlikely(!cached)) {
  5530. found_uncached_bg = true;
  5531. ret = cache_block_group(block_group, 0);
  5532. BUG_ON(ret < 0);
  5533. ret = 0;
  5534. }
  5535. if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
  5536. goto loop;
  5537. if (unlikely(block_group->ro))
  5538. goto loop;
  5539. /*
  5540. * Ok we want to try and use the cluster allocator, so
  5541. * lets look there
  5542. */
  5543. if (last_ptr) {
  5544. unsigned long aligned_cluster;
  5545. /*
  5546. * the refill lock keeps out other
  5547. * people trying to start a new cluster
  5548. */
  5549. spin_lock(&last_ptr->refill_lock);
  5550. used_block_group = last_ptr->block_group;
  5551. if (used_block_group != block_group &&
  5552. (!used_block_group ||
  5553. used_block_group->ro ||
  5554. !block_group_bits(used_block_group, flags))) {
  5555. used_block_group = block_group;
  5556. goto refill_cluster;
  5557. }
  5558. if (used_block_group != block_group)
  5559. btrfs_get_block_group(used_block_group);
  5560. offset = btrfs_alloc_from_cluster(used_block_group,
  5561. last_ptr,
  5562. num_bytes,
  5563. used_block_group->key.objectid,
  5564. &max_extent_size);
  5565. if (offset) {
  5566. /* we have a block, we're done */
  5567. spin_unlock(&last_ptr->refill_lock);
  5568. trace_btrfs_reserve_extent_cluster(root,
  5569. block_group, search_start, num_bytes);
  5570. goto checks;
  5571. }
  5572. WARN_ON(last_ptr->block_group != used_block_group);
  5573. if (used_block_group != block_group) {
  5574. btrfs_put_block_group(used_block_group);
  5575. used_block_group = block_group;
  5576. }
  5577. refill_cluster:
  5578. BUG_ON(used_block_group != block_group);
  5579. /* If we are on LOOP_NO_EMPTY_SIZE, we can't
  5580. * set up a new clusters, so lets just skip it
  5581. * and let the allocator find whatever block
  5582. * it can find. If we reach this point, we
  5583. * will have tried the cluster allocator
  5584. * plenty of times and not have found
  5585. * anything, so we are likely way too
  5586. * fragmented for the clustering stuff to find
  5587. * anything.
  5588. *
  5589. * However, if the cluster is taken from the
  5590. * current block group, release the cluster
  5591. * first, so that we stand a better chance of
  5592. * succeeding in the unclustered
  5593. * allocation. */
  5594. if (loop >= LOOP_NO_EMPTY_SIZE &&
  5595. last_ptr->block_group != block_group) {
  5596. spin_unlock(&last_ptr->refill_lock);
  5597. goto unclustered_alloc;
  5598. }
  5599. /*
  5600. * this cluster didn't work out, free it and
  5601. * start over
  5602. */
  5603. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  5604. if (loop >= LOOP_NO_EMPTY_SIZE) {
  5605. spin_unlock(&last_ptr->refill_lock);
  5606. goto unclustered_alloc;
  5607. }
  5608. aligned_cluster = max_t(unsigned long,
  5609. empty_cluster + empty_size,
  5610. block_group->full_stripe_len);
  5611. /* allocate a cluster in this block group */
  5612. ret = btrfs_find_space_cluster(root, block_group,
  5613. last_ptr, search_start,
  5614. num_bytes,
  5615. aligned_cluster);
  5616. if (ret == 0) {
  5617. /*
  5618. * now pull our allocation out of this
  5619. * cluster
  5620. */
  5621. offset = btrfs_alloc_from_cluster(block_group,
  5622. last_ptr,
  5623. num_bytes,
  5624. search_start,
  5625. &max_extent_size);
  5626. if (offset) {
  5627. /* we found one, proceed */
  5628. spin_unlock(&last_ptr->refill_lock);
  5629. trace_btrfs_reserve_extent_cluster(root,
  5630. block_group, search_start,
  5631. num_bytes);
  5632. goto checks;
  5633. }
  5634. } else if (!cached && loop > LOOP_CACHING_NOWAIT
  5635. && !failed_cluster_refill) {
  5636. spin_unlock(&last_ptr->refill_lock);
  5637. failed_cluster_refill = true;
  5638. wait_block_group_cache_progress(block_group,
  5639. num_bytes + empty_cluster + empty_size);
  5640. goto have_block_group;
  5641. }
  5642. /*
  5643. * at this point we either didn't find a cluster
  5644. * or we weren't able to allocate a block from our
  5645. * cluster. Free the cluster we've been trying
  5646. * to use, and go to the next block group
  5647. */
  5648. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  5649. spin_unlock(&last_ptr->refill_lock);
  5650. goto loop;
  5651. }
  5652. unclustered_alloc:
  5653. spin_lock(&block_group->free_space_ctl->tree_lock);
  5654. if (cached &&
  5655. block_group->free_space_ctl->free_space <
  5656. num_bytes + empty_cluster + empty_size) {
  5657. if (block_group->free_space_ctl->free_space >
  5658. max_extent_size)
  5659. max_extent_size =
  5660. block_group->free_space_ctl->free_space;
  5661. spin_unlock(&block_group->free_space_ctl->tree_lock);
  5662. goto loop;
  5663. }
  5664. spin_unlock(&block_group->free_space_ctl->tree_lock);
  5665. offset = btrfs_find_space_for_alloc(block_group, search_start,
  5666. num_bytes, empty_size,
  5667. &max_extent_size);
  5668. /*
  5669. * If we didn't find a chunk, and we haven't failed on this
  5670. * block group before, and this block group is in the middle of
  5671. * caching and we are ok with waiting, then go ahead and wait
  5672. * for progress to be made, and set failed_alloc to true.
  5673. *
  5674. * If failed_alloc is true then we've already waited on this
  5675. * block group once and should move on to the next block group.
  5676. */
  5677. if (!offset && !failed_alloc && !cached &&
  5678. loop > LOOP_CACHING_NOWAIT) {
  5679. wait_block_group_cache_progress(block_group,
  5680. num_bytes + empty_size);
  5681. failed_alloc = true;
  5682. goto have_block_group;
  5683. } else if (!offset) {
  5684. if (!cached)
  5685. have_caching_bg = true;
  5686. goto loop;
  5687. }
  5688. checks:
  5689. search_start = stripe_align(root, used_block_group,
  5690. offset, num_bytes);
  5691. /* move on to the next group */
  5692. if (search_start + num_bytes >
  5693. used_block_group->key.objectid + used_block_group->key.offset) {
  5694. btrfs_add_free_space(used_block_group, offset, num_bytes);
  5695. goto loop;
  5696. }
  5697. if (offset < search_start)
  5698. btrfs_add_free_space(used_block_group, offset,
  5699. search_start - offset);
  5700. BUG_ON(offset > search_start);
  5701. ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
  5702. alloc_type);
  5703. if (ret == -EAGAIN) {
  5704. btrfs_add_free_space(used_block_group, offset, num_bytes);
  5705. goto loop;
  5706. }
  5707. /* we are all good, lets return */
  5708. ins->objectid = search_start;
  5709. ins->offset = num_bytes;
  5710. trace_btrfs_reserve_extent(orig_root, block_group,
  5711. search_start, num_bytes);
  5712. if (used_block_group != block_group)
  5713. btrfs_put_block_group(used_block_group);
  5714. btrfs_put_block_group(block_group);
  5715. break;
  5716. loop:
  5717. failed_cluster_refill = false;
  5718. failed_alloc = false;
  5719. BUG_ON(index != get_block_group_index(block_group));
  5720. if (used_block_group != block_group)
  5721. btrfs_put_block_group(used_block_group);
  5722. btrfs_put_block_group(block_group);
  5723. }
  5724. up_read(&space_info->groups_sem);
  5725. if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
  5726. goto search;
  5727. if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
  5728. goto search;
  5729. /*
  5730. * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
  5731. * caching kthreads as we move along
  5732. * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
  5733. * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
  5734. * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
  5735. * again
  5736. */
  5737. if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
  5738. index = 0;
  5739. loop++;
  5740. if (loop == LOOP_ALLOC_CHUNK) {
  5741. struct btrfs_trans_handle *trans;
  5742. trans = btrfs_join_transaction(root);
  5743. if (IS_ERR(trans)) {
  5744. ret = PTR_ERR(trans);
  5745. goto out;
  5746. }
  5747. ret = do_chunk_alloc(trans, root, flags,
  5748. CHUNK_ALLOC_FORCE);
  5749. /*
  5750. * Do not bail out on ENOSPC since we
  5751. * can do more things.
  5752. */
  5753. if (ret < 0 && ret != -ENOSPC)
  5754. btrfs_abort_transaction(trans,
  5755. root, ret);
  5756. else
  5757. ret = 0;
  5758. btrfs_end_transaction(trans, root);
  5759. if (ret)
  5760. goto out;
  5761. }
  5762. if (loop == LOOP_NO_EMPTY_SIZE) {
  5763. empty_size = 0;
  5764. empty_cluster = 0;
  5765. }
  5766. goto search;
  5767. } else if (!ins->objectid) {
  5768. ret = -ENOSPC;
  5769. } else if (ins->objectid) {
  5770. ret = 0;
  5771. }
  5772. out:
  5773. if (ret == -ENOSPC)
  5774. ins->offset = max_extent_size;
  5775. return ret;
  5776. }
  5777. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  5778. int dump_block_groups)
  5779. {
  5780. struct btrfs_block_group_cache *cache;
  5781. int index = 0;
  5782. spin_lock(&info->lock);
  5783. printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
  5784. info->flags,
  5785. info->total_bytes - info->bytes_used - info->bytes_pinned -
  5786. info->bytes_reserved - info->bytes_readonly,
  5787. (info->full) ? "" : "not ");
  5788. printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
  5789. "reserved=%llu, may_use=%llu, readonly=%llu\n",
  5790. info->total_bytes, info->bytes_used, info->bytes_pinned,
  5791. info->bytes_reserved, info->bytes_may_use,
  5792. info->bytes_readonly);
  5793. spin_unlock(&info->lock);
  5794. if (!dump_block_groups)
  5795. return;
  5796. down_read(&info->groups_sem);
  5797. again:
  5798. list_for_each_entry(cache, &info->block_groups[index], list) {
  5799. spin_lock(&cache->lock);
  5800. printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
  5801. cache->key.objectid, cache->key.offset,
  5802. btrfs_block_group_used(&cache->item), cache->pinned,
  5803. cache->reserved, cache->ro ? "[readonly]" : "");
  5804. btrfs_dump_free_space(cache, bytes);
  5805. spin_unlock(&cache->lock);
  5806. }
  5807. if (++index < BTRFS_NR_RAID_TYPES)
  5808. goto again;
  5809. up_read(&info->groups_sem);
  5810. }
  5811. int btrfs_reserve_extent(struct btrfs_root *root,
  5812. u64 num_bytes, u64 min_alloc_size,
  5813. u64 empty_size, u64 hint_byte,
  5814. struct btrfs_key *ins, int is_data)
  5815. {
  5816. bool final_tried = false;
  5817. u64 flags;
  5818. int ret;
  5819. flags = btrfs_get_alloc_profile(root, is_data);
  5820. again:
  5821. WARN_ON(num_bytes < root->sectorsize);
  5822. ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
  5823. flags);
  5824. if (ret == -ENOSPC) {
  5825. if (!final_tried && ins->offset) {
  5826. num_bytes = min(num_bytes >> 1, ins->offset);
  5827. num_bytes = round_down(num_bytes, root->sectorsize);
  5828. num_bytes = max(num_bytes, min_alloc_size);
  5829. if (num_bytes == min_alloc_size)
  5830. final_tried = true;
  5831. goto again;
  5832. } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
  5833. struct btrfs_space_info *sinfo;
  5834. sinfo = __find_space_info(root->fs_info, flags);
  5835. btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
  5836. flags, num_bytes);
  5837. if (sinfo)
  5838. dump_space_info(sinfo, num_bytes, 1);
  5839. }
  5840. }
  5841. return ret;
  5842. }
  5843. static int __btrfs_free_reserved_extent(struct btrfs_root *root,
  5844. u64 start, u64 len, int pin)
  5845. {
  5846. struct btrfs_block_group_cache *cache;
  5847. int ret = 0;
  5848. cache = btrfs_lookup_block_group(root->fs_info, start);
  5849. if (!cache) {
  5850. btrfs_err(root->fs_info, "Unable to find block group for %llu",
  5851. start);
  5852. return -ENOSPC;
  5853. }
  5854. if (btrfs_test_opt(root, DISCARD))
  5855. ret = btrfs_discard_extent(root, start, len, NULL);
  5856. if (pin)
  5857. pin_down_extent(root, cache, start, len, 1);
  5858. else {
  5859. btrfs_add_free_space(cache, start, len);
  5860. btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
  5861. }
  5862. btrfs_put_block_group(cache);
  5863. trace_btrfs_reserved_extent_free(root, start, len);
  5864. return ret;
  5865. }
  5866. int btrfs_free_reserved_extent(struct btrfs_root *root,
  5867. u64 start, u64 len)
  5868. {
  5869. return __btrfs_free_reserved_extent(root, start, len, 0);
  5870. }
  5871. int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
  5872. u64 start, u64 len)
  5873. {
  5874. return __btrfs_free_reserved_extent(root, start, len, 1);
  5875. }
  5876. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  5877. struct btrfs_root *root,
  5878. u64 parent, u64 root_objectid,
  5879. u64 flags, u64 owner, u64 offset,
  5880. struct btrfs_key *ins, int ref_mod)
  5881. {
  5882. int ret;
  5883. struct btrfs_fs_info *fs_info = root->fs_info;
  5884. struct btrfs_extent_item *extent_item;
  5885. struct btrfs_extent_inline_ref *iref;
  5886. struct btrfs_path *path;
  5887. struct extent_buffer *leaf;
  5888. int type;
  5889. u32 size;
  5890. if (parent > 0)
  5891. type = BTRFS_SHARED_DATA_REF_KEY;
  5892. else
  5893. type = BTRFS_EXTENT_DATA_REF_KEY;
  5894. size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
  5895. path = btrfs_alloc_path();
  5896. if (!path)
  5897. return -ENOMEM;
  5898. path->leave_spinning = 1;
  5899. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  5900. ins, size);
  5901. if (ret) {
  5902. btrfs_free_path(path);
  5903. return ret;
  5904. }
  5905. leaf = path->nodes[0];
  5906. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  5907. struct btrfs_extent_item);
  5908. btrfs_set_extent_refs(leaf, extent_item, ref_mod);
  5909. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  5910. btrfs_set_extent_flags(leaf, extent_item,
  5911. flags | BTRFS_EXTENT_FLAG_DATA);
  5912. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  5913. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  5914. if (parent > 0) {
  5915. struct btrfs_shared_data_ref *ref;
  5916. ref = (struct btrfs_shared_data_ref *)(iref + 1);
  5917. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  5918. btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
  5919. } else {
  5920. struct btrfs_extent_data_ref *ref;
  5921. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  5922. btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
  5923. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  5924. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  5925. btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
  5926. }
  5927. btrfs_mark_buffer_dirty(path->nodes[0]);
  5928. btrfs_free_path(path);
  5929. ret = update_block_group(root, ins->objectid, ins->offset, 1);
  5930. if (ret) { /* -ENOENT, logic error */
  5931. btrfs_err(fs_info, "update block group failed for %llu %llu",
  5932. ins->objectid, ins->offset);
  5933. BUG();
  5934. }
  5935. trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
  5936. return ret;
  5937. }
  5938. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  5939. struct btrfs_root *root,
  5940. u64 parent, u64 root_objectid,
  5941. u64 flags, struct btrfs_disk_key *key,
  5942. int level, struct btrfs_key *ins)
  5943. {
  5944. int ret;
  5945. struct btrfs_fs_info *fs_info = root->fs_info;
  5946. struct btrfs_extent_item *extent_item;
  5947. struct btrfs_tree_block_info *block_info;
  5948. struct btrfs_extent_inline_ref *iref;
  5949. struct btrfs_path *path;
  5950. struct extent_buffer *leaf;
  5951. u32 size = sizeof(*extent_item) + sizeof(*iref);
  5952. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  5953. SKINNY_METADATA);
  5954. if (!skinny_metadata)
  5955. size += sizeof(*block_info);
  5956. path = btrfs_alloc_path();
  5957. if (!path) {
  5958. btrfs_free_and_pin_reserved_extent(root, ins->objectid,
  5959. root->leafsize);
  5960. return -ENOMEM;
  5961. }
  5962. path->leave_spinning = 1;
  5963. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  5964. ins, size);
  5965. if (ret) {
  5966. btrfs_free_and_pin_reserved_extent(root, ins->objectid,
  5967. root->leafsize);
  5968. btrfs_free_path(path);
  5969. return ret;
  5970. }
  5971. leaf = path->nodes[0];
  5972. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  5973. struct btrfs_extent_item);
  5974. btrfs_set_extent_refs(leaf, extent_item, 1);
  5975. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  5976. btrfs_set_extent_flags(leaf, extent_item,
  5977. flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
  5978. if (skinny_metadata) {
  5979. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  5980. } else {
  5981. block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
  5982. btrfs_set_tree_block_key(leaf, block_info, key);
  5983. btrfs_set_tree_block_level(leaf, block_info, level);
  5984. iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
  5985. }
  5986. if (parent > 0) {
  5987. BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
  5988. btrfs_set_extent_inline_ref_type(leaf, iref,
  5989. BTRFS_SHARED_BLOCK_REF_KEY);
  5990. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  5991. } else {
  5992. btrfs_set_extent_inline_ref_type(leaf, iref,
  5993. BTRFS_TREE_BLOCK_REF_KEY);
  5994. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  5995. }
  5996. btrfs_mark_buffer_dirty(leaf);
  5997. btrfs_free_path(path);
  5998. ret = update_block_group(root, ins->objectid, root->leafsize, 1);
  5999. if (ret) { /* -ENOENT, logic error */
  6000. btrfs_err(fs_info, "update block group failed for %llu %llu",
  6001. ins->objectid, ins->offset);
  6002. BUG();
  6003. }
  6004. trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->leafsize);
  6005. return ret;
  6006. }
  6007. int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  6008. struct btrfs_root *root,
  6009. u64 root_objectid, u64 owner,
  6010. u64 offset, struct btrfs_key *ins)
  6011. {
  6012. int ret;
  6013. BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
  6014. ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
  6015. ins->offset, 0,
  6016. root_objectid, owner, offset,
  6017. BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
  6018. return ret;
  6019. }
  6020. /*
  6021. * this is used by the tree logging recovery code. It records that
  6022. * an extent has been allocated and makes sure to clear the free
  6023. * space cache bits as well
  6024. */
  6025. int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
  6026. struct btrfs_root *root,
  6027. u64 root_objectid, u64 owner, u64 offset,
  6028. struct btrfs_key *ins)
  6029. {
  6030. int ret;
  6031. struct btrfs_block_group_cache *block_group;
  6032. /*
  6033. * Mixed block groups will exclude before processing the log so we only
  6034. * need to do the exlude dance if this fs isn't mixed.
  6035. */
  6036. if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
  6037. ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
  6038. if (ret)
  6039. return ret;
  6040. }
  6041. block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
  6042. if (!block_group)
  6043. return -EINVAL;
  6044. ret = btrfs_update_reserved_bytes(block_group, ins->offset,
  6045. RESERVE_ALLOC_NO_ACCOUNT);
  6046. BUG_ON(ret); /* logic error */
  6047. ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
  6048. 0, owner, offset, ins, 1);
  6049. btrfs_put_block_group(block_group);
  6050. return ret;
  6051. }
  6052. static struct extent_buffer *
  6053. btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  6054. u64 bytenr, u32 blocksize, int level)
  6055. {
  6056. struct extent_buffer *buf;
  6057. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  6058. if (!buf)
  6059. return ERR_PTR(-ENOMEM);
  6060. btrfs_set_header_generation(buf, trans->transid);
  6061. btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
  6062. btrfs_tree_lock(buf);
  6063. clean_tree_block(trans, root, buf);
  6064. clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
  6065. btrfs_set_lock_blocking(buf);
  6066. btrfs_set_buffer_uptodate(buf);
  6067. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  6068. /*
  6069. * we allow two log transactions at a time, use different
  6070. * EXENT bit to differentiate dirty pages.
  6071. */
  6072. if (root->log_transid % 2 == 0)
  6073. set_extent_dirty(&root->dirty_log_pages, buf->start,
  6074. buf->start + buf->len - 1, GFP_NOFS);
  6075. else
  6076. set_extent_new(&root->dirty_log_pages, buf->start,
  6077. buf->start + buf->len - 1, GFP_NOFS);
  6078. } else {
  6079. set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
  6080. buf->start + buf->len - 1, GFP_NOFS);
  6081. }
  6082. trans->blocks_used++;
  6083. /* this returns a buffer locked for blocking */
  6084. return buf;
  6085. }
  6086. static struct btrfs_block_rsv *
  6087. use_block_rsv(struct btrfs_trans_handle *trans,
  6088. struct btrfs_root *root, u32 blocksize)
  6089. {
  6090. struct btrfs_block_rsv *block_rsv;
  6091. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  6092. int ret;
  6093. bool global_updated = false;
  6094. block_rsv = get_block_rsv(trans, root);
  6095. if (unlikely(block_rsv->size == 0))
  6096. goto try_reserve;
  6097. again:
  6098. ret = block_rsv_use_bytes(block_rsv, blocksize);
  6099. if (!ret)
  6100. return block_rsv;
  6101. if (block_rsv->failfast)
  6102. return ERR_PTR(ret);
  6103. if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
  6104. global_updated = true;
  6105. update_global_block_rsv(root->fs_info);
  6106. goto again;
  6107. }
  6108. if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
  6109. static DEFINE_RATELIMIT_STATE(_rs,
  6110. DEFAULT_RATELIMIT_INTERVAL * 10,
  6111. /*DEFAULT_RATELIMIT_BURST*/ 1);
  6112. if (__ratelimit(&_rs))
  6113. WARN(1, KERN_DEBUG
  6114. "btrfs: block rsv returned %d\n", ret);
  6115. }
  6116. try_reserve:
  6117. ret = reserve_metadata_bytes(root, block_rsv, blocksize,
  6118. BTRFS_RESERVE_NO_FLUSH);
  6119. if (!ret)
  6120. return block_rsv;
  6121. /*
  6122. * If we couldn't reserve metadata bytes try and use some from
  6123. * the global reserve if its space type is the same as the global
  6124. * reservation.
  6125. */
  6126. if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
  6127. block_rsv->space_info == global_rsv->space_info) {
  6128. ret = block_rsv_use_bytes(global_rsv, blocksize);
  6129. if (!ret)
  6130. return global_rsv;
  6131. }
  6132. return ERR_PTR(ret);
  6133. }
  6134. static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
  6135. struct btrfs_block_rsv *block_rsv, u32 blocksize)
  6136. {
  6137. block_rsv_add_bytes(block_rsv, blocksize, 0);
  6138. block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
  6139. }
  6140. /*
  6141. * finds a free extent and does all the dirty work required for allocation
  6142. * returns the key for the extent through ins, and a tree buffer for
  6143. * the first block of the extent through buf.
  6144. *
  6145. * returns the tree buffer or NULL.
  6146. */
  6147. struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
  6148. struct btrfs_root *root, u32 blocksize,
  6149. u64 parent, u64 root_objectid,
  6150. struct btrfs_disk_key *key, int level,
  6151. u64 hint, u64 empty_size)
  6152. {
  6153. struct btrfs_key ins;
  6154. struct btrfs_block_rsv *block_rsv;
  6155. struct extent_buffer *buf;
  6156. u64 flags = 0;
  6157. int ret;
  6158. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  6159. SKINNY_METADATA);
  6160. block_rsv = use_block_rsv(trans, root, blocksize);
  6161. if (IS_ERR(block_rsv))
  6162. return ERR_CAST(block_rsv);
  6163. ret = btrfs_reserve_extent(root, blocksize, blocksize,
  6164. empty_size, hint, &ins, 0);
  6165. if (ret) {
  6166. unuse_block_rsv(root->fs_info, block_rsv, blocksize);
  6167. return ERR_PTR(ret);
  6168. }
  6169. buf = btrfs_init_new_buffer(trans, root, ins.objectid,
  6170. blocksize, level);
  6171. BUG_ON(IS_ERR(buf)); /* -ENOMEM */
  6172. if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
  6173. if (parent == 0)
  6174. parent = ins.objectid;
  6175. flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
  6176. } else
  6177. BUG_ON(parent > 0);
  6178. if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
  6179. struct btrfs_delayed_extent_op *extent_op;
  6180. extent_op = btrfs_alloc_delayed_extent_op();
  6181. BUG_ON(!extent_op); /* -ENOMEM */
  6182. if (key)
  6183. memcpy(&extent_op->key, key, sizeof(extent_op->key));
  6184. else
  6185. memset(&extent_op->key, 0, sizeof(extent_op->key));
  6186. extent_op->flags_to_set = flags;
  6187. if (skinny_metadata)
  6188. extent_op->update_key = 0;
  6189. else
  6190. extent_op->update_key = 1;
  6191. extent_op->update_flags = 1;
  6192. extent_op->is_data = 0;
  6193. extent_op->level = level;
  6194. ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
  6195. ins.objectid,
  6196. ins.offset, parent, root_objectid,
  6197. level, BTRFS_ADD_DELAYED_EXTENT,
  6198. extent_op, 0);
  6199. BUG_ON(ret); /* -ENOMEM */
  6200. }
  6201. return buf;
  6202. }
  6203. struct walk_control {
  6204. u64 refs[BTRFS_MAX_LEVEL];
  6205. u64 flags[BTRFS_MAX_LEVEL];
  6206. struct btrfs_key update_progress;
  6207. int stage;
  6208. int level;
  6209. int shared_level;
  6210. int update_ref;
  6211. int keep_locks;
  6212. int reada_slot;
  6213. int reada_count;
  6214. int for_reloc;
  6215. };
  6216. #define DROP_REFERENCE 1
  6217. #define UPDATE_BACKREF 2
  6218. static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
  6219. struct btrfs_root *root,
  6220. struct walk_control *wc,
  6221. struct btrfs_path *path)
  6222. {
  6223. u64 bytenr;
  6224. u64 generation;
  6225. u64 refs;
  6226. u64 flags;
  6227. u32 nritems;
  6228. u32 blocksize;
  6229. struct btrfs_key key;
  6230. struct extent_buffer *eb;
  6231. int ret;
  6232. int slot;
  6233. int nread = 0;
  6234. if (path->slots[wc->level] < wc->reada_slot) {
  6235. wc->reada_count = wc->reada_count * 2 / 3;
  6236. wc->reada_count = max(wc->reada_count, 2);
  6237. } else {
  6238. wc->reada_count = wc->reada_count * 3 / 2;
  6239. wc->reada_count = min_t(int, wc->reada_count,
  6240. BTRFS_NODEPTRS_PER_BLOCK(root));
  6241. }
  6242. eb = path->nodes[wc->level];
  6243. nritems = btrfs_header_nritems(eb);
  6244. blocksize = btrfs_level_size(root, wc->level - 1);
  6245. for (slot = path->slots[wc->level]; slot < nritems; slot++) {
  6246. if (nread >= wc->reada_count)
  6247. break;
  6248. cond_resched();
  6249. bytenr = btrfs_node_blockptr(eb, slot);
  6250. generation = btrfs_node_ptr_generation(eb, slot);
  6251. if (slot == path->slots[wc->level])
  6252. goto reada;
  6253. if (wc->stage == UPDATE_BACKREF &&
  6254. generation <= root->root_key.offset)
  6255. continue;
  6256. /* We don't lock the tree block, it's OK to be racy here */
  6257. ret = btrfs_lookup_extent_info(trans, root, bytenr,
  6258. wc->level - 1, 1, &refs,
  6259. &flags);
  6260. /* We don't care about errors in readahead. */
  6261. if (ret < 0)
  6262. continue;
  6263. BUG_ON(refs == 0);
  6264. if (wc->stage == DROP_REFERENCE) {
  6265. if (refs == 1)
  6266. goto reada;
  6267. if (wc->level == 1 &&
  6268. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  6269. continue;
  6270. if (!wc->update_ref ||
  6271. generation <= root->root_key.offset)
  6272. continue;
  6273. btrfs_node_key_to_cpu(eb, &key, slot);
  6274. ret = btrfs_comp_cpu_keys(&key,
  6275. &wc->update_progress);
  6276. if (ret < 0)
  6277. continue;
  6278. } else {
  6279. if (wc->level == 1 &&
  6280. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  6281. continue;
  6282. }
  6283. reada:
  6284. ret = readahead_tree_block(root, bytenr, blocksize,
  6285. generation);
  6286. if (ret)
  6287. break;
  6288. nread++;
  6289. }
  6290. wc->reada_slot = slot;
  6291. }
  6292. /*
  6293. * helper to process tree block while walking down the tree.
  6294. *
  6295. * when wc->stage == UPDATE_BACKREF, this function updates
  6296. * back refs for pointers in the block.
  6297. *
  6298. * NOTE: return value 1 means we should stop walking down.
  6299. */
  6300. static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
  6301. struct btrfs_root *root,
  6302. struct btrfs_path *path,
  6303. struct walk_control *wc, int lookup_info)
  6304. {
  6305. int level = wc->level;
  6306. struct extent_buffer *eb = path->nodes[level];
  6307. u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  6308. int ret;
  6309. if (wc->stage == UPDATE_BACKREF &&
  6310. btrfs_header_owner(eb) != root->root_key.objectid)
  6311. return 1;
  6312. /*
  6313. * when reference count of tree block is 1, it won't increase
  6314. * again. once full backref flag is set, we never clear it.
  6315. */
  6316. if (lookup_info &&
  6317. ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
  6318. (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
  6319. BUG_ON(!path->locks[level]);
  6320. ret = btrfs_lookup_extent_info(trans, root,
  6321. eb->start, level, 1,
  6322. &wc->refs[level],
  6323. &wc->flags[level]);
  6324. BUG_ON(ret == -ENOMEM);
  6325. if (ret)
  6326. return ret;
  6327. BUG_ON(wc->refs[level] == 0);
  6328. }
  6329. if (wc->stage == DROP_REFERENCE) {
  6330. if (wc->refs[level] > 1)
  6331. return 1;
  6332. if (path->locks[level] && !wc->keep_locks) {
  6333. btrfs_tree_unlock_rw(eb, path->locks[level]);
  6334. path->locks[level] = 0;
  6335. }
  6336. return 0;
  6337. }
  6338. /* wc->stage == UPDATE_BACKREF */
  6339. if (!(wc->flags[level] & flag)) {
  6340. BUG_ON(!path->locks[level]);
  6341. ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
  6342. BUG_ON(ret); /* -ENOMEM */
  6343. ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
  6344. BUG_ON(ret); /* -ENOMEM */
  6345. ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
  6346. eb->len, flag,
  6347. btrfs_header_level(eb), 0);
  6348. BUG_ON(ret); /* -ENOMEM */
  6349. wc->flags[level] |= flag;
  6350. }
  6351. /*
  6352. * the block is shared by multiple trees, so it's not good to
  6353. * keep the tree lock
  6354. */
  6355. if (path->locks[level] && level > 0) {
  6356. btrfs_tree_unlock_rw(eb, path->locks[level]);
  6357. path->locks[level] = 0;
  6358. }
  6359. return 0;
  6360. }
  6361. /*
  6362. * helper to process tree block pointer.
  6363. *
  6364. * when wc->stage == DROP_REFERENCE, this function checks
  6365. * reference count of the block pointed to. if the block
  6366. * is shared and we need update back refs for the subtree
  6367. * rooted at the block, this function changes wc->stage to
  6368. * UPDATE_BACKREF. if the block is shared and there is no
  6369. * need to update back, this function drops the reference
  6370. * to the block.
  6371. *
  6372. * NOTE: return value 1 means we should stop walking down.
  6373. */
  6374. static noinline int do_walk_down(struct btrfs_trans_handle *trans,
  6375. struct btrfs_root *root,
  6376. struct btrfs_path *path,
  6377. struct walk_control *wc, int *lookup_info)
  6378. {
  6379. u64 bytenr;
  6380. u64 generation;
  6381. u64 parent;
  6382. u32 blocksize;
  6383. struct btrfs_key key;
  6384. struct extent_buffer *next;
  6385. int level = wc->level;
  6386. int reada = 0;
  6387. int ret = 0;
  6388. generation = btrfs_node_ptr_generation(path->nodes[level],
  6389. path->slots[level]);
  6390. /*
  6391. * if the lower level block was created before the snapshot
  6392. * was created, we know there is no need to update back refs
  6393. * for the subtree
  6394. */
  6395. if (wc->stage == UPDATE_BACKREF &&
  6396. generation <= root->root_key.offset) {
  6397. *lookup_info = 1;
  6398. return 1;
  6399. }
  6400. bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
  6401. blocksize = btrfs_level_size(root, level - 1);
  6402. next = btrfs_find_tree_block(root, bytenr, blocksize);
  6403. if (!next) {
  6404. next = btrfs_find_create_tree_block(root, bytenr, blocksize);
  6405. if (!next)
  6406. return -ENOMEM;
  6407. btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
  6408. level - 1);
  6409. reada = 1;
  6410. }
  6411. btrfs_tree_lock(next);
  6412. btrfs_set_lock_blocking(next);
  6413. ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
  6414. &wc->refs[level - 1],
  6415. &wc->flags[level - 1]);
  6416. if (ret < 0) {
  6417. btrfs_tree_unlock(next);
  6418. return ret;
  6419. }
  6420. if (unlikely(wc->refs[level - 1] == 0)) {
  6421. btrfs_err(root->fs_info, "Missing references.");
  6422. BUG();
  6423. }
  6424. *lookup_info = 0;
  6425. if (wc->stage == DROP_REFERENCE) {
  6426. if (wc->refs[level - 1] > 1) {
  6427. if (level == 1 &&
  6428. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  6429. goto skip;
  6430. if (!wc->update_ref ||
  6431. generation <= root->root_key.offset)
  6432. goto skip;
  6433. btrfs_node_key_to_cpu(path->nodes[level], &key,
  6434. path->slots[level]);
  6435. ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
  6436. if (ret < 0)
  6437. goto skip;
  6438. wc->stage = UPDATE_BACKREF;
  6439. wc->shared_level = level - 1;
  6440. }
  6441. } else {
  6442. if (level == 1 &&
  6443. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  6444. goto skip;
  6445. }
  6446. if (!btrfs_buffer_uptodate(next, generation, 0)) {
  6447. btrfs_tree_unlock(next);
  6448. free_extent_buffer(next);
  6449. next = NULL;
  6450. *lookup_info = 1;
  6451. }
  6452. if (!next) {
  6453. if (reada && level == 1)
  6454. reada_walk_down(trans, root, wc, path);
  6455. next = read_tree_block(root, bytenr, blocksize, generation);
  6456. if (!next || !extent_buffer_uptodate(next)) {
  6457. free_extent_buffer(next);
  6458. return -EIO;
  6459. }
  6460. btrfs_tree_lock(next);
  6461. btrfs_set_lock_blocking(next);
  6462. }
  6463. level--;
  6464. BUG_ON(level != btrfs_header_level(next));
  6465. path->nodes[level] = next;
  6466. path->slots[level] = 0;
  6467. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6468. wc->level = level;
  6469. if (wc->level == 1)
  6470. wc->reada_slot = 0;
  6471. return 0;
  6472. skip:
  6473. wc->refs[level - 1] = 0;
  6474. wc->flags[level - 1] = 0;
  6475. if (wc->stage == DROP_REFERENCE) {
  6476. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
  6477. parent = path->nodes[level]->start;
  6478. } else {
  6479. BUG_ON(root->root_key.objectid !=
  6480. btrfs_header_owner(path->nodes[level]));
  6481. parent = 0;
  6482. }
  6483. ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
  6484. root->root_key.objectid, level - 1, 0, 0);
  6485. BUG_ON(ret); /* -ENOMEM */
  6486. }
  6487. btrfs_tree_unlock(next);
  6488. free_extent_buffer(next);
  6489. *lookup_info = 1;
  6490. return 1;
  6491. }
  6492. /*
  6493. * helper to process tree block while walking up the tree.
  6494. *
  6495. * when wc->stage == DROP_REFERENCE, this function drops
  6496. * reference count on the block.
  6497. *
  6498. * when wc->stage == UPDATE_BACKREF, this function changes
  6499. * wc->stage back to DROP_REFERENCE if we changed wc->stage
  6500. * to UPDATE_BACKREF previously while processing the block.
  6501. *
  6502. * NOTE: return value 1 means we should stop walking up.
  6503. */
  6504. static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
  6505. struct btrfs_root *root,
  6506. struct btrfs_path *path,
  6507. struct walk_control *wc)
  6508. {
  6509. int ret;
  6510. int level = wc->level;
  6511. struct extent_buffer *eb = path->nodes[level];
  6512. u64 parent = 0;
  6513. if (wc->stage == UPDATE_BACKREF) {
  6514. BUG_ON(wc->shared_level < level);
  6515. if (level < wc->shared_level)
  6516. goto out;
  6517. ret = find_next_key(path, level + 1, &wc->update_progress);
  6518. if (ret > 0)
  6519. wc->update_ref = 0;
  6520. wc->stage = DROP_REFERENCE;
  6521. wc->shared_level = -1;
  6522. path->slots[level] = 0;
  6523. /*
  6524. * check reference count again if the block isn't locked.
  6525. * we should start walking down the tree again if reference
  6526. * count is one.
  6527. */
  6528. if (!path->locks[level]) {
  6529. BUG_ON(level == 0);
  6530. btrfs_tree_lock(eb);
  6531. btrfs_set_lock_blocking(eb);
  6532. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6533. ret = btrfs_lookup_extent_info(trans, root,
  6534. eb->start, level, 1,
  6535. &wc->refs[level],
  6536. &wc->flags[level]);
  6537. if (ret < 0) {
  6538. btrfs_tree_unlock_rw(eb, path->locks[level]);
  6539. path->locks[level] = 0;
  6540. return ret;
  6541. }
  6542. BUG_ON(wc->refs[level] == 0);
  6543. if (wc->refs[level] == 1) {
  6544. btrfs_tree_unlock_rw(eb, path->locks[level]);
  6545. path->locks[level] = 0;
  6546. return 1;
  6547. }
  6548. }
  6549. }
  6550. /* wc->stage == DROP_REFERENCE */
  6551. BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
  6552. if (wc->refs[level] == 1) {
  6553. if (level == 0) {
  6554. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  6555. ret = btrfs_dec_ref(trans, root, eb, 1,
  6556. wc->for_reloc);
  6557. else
  6558. ret = btrfs_dec_ref(trans, root, eb, 0,
  6559. wc->for_reloc);
  6560. BUG_ON(ret); /* -ENOMEM */
  6561. }
  6562. /* make block locked assertion in clean_tree_block happy */
  6563. if (!path->locks[level] &&
  6564. btrfs_header_generation(eb) == trans->transid) {
  6565. btrfs_tree_lock(eb);
  6566. btrfs_set_lock_blocking(eb);
  6567. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6568. }
  6569. clean_tree_block(trans, root, eb);
  6570. }
  6571. if (eb == root->node) {
  6572. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  6573. parent = eb->start;
  6574. else
  6575. BUG_ON(root->root_key.objectid !=
  6576. btrfs_header_owner(eb));
  6577. } else {
  6578. if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  6579. parent = path->nodes[level + 1]->start;
  6580. else
  6581. BUG_ON(root->root_key.objectid !=
  6582. btrfs_header_owner(path->nodes[level + 1]));
  6583. }
  6584. btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
  6585. out:
  6586. wc->refs[level] = 0;
  6587. wc->flags[level] = 0;
  6588. return 0;
  6589. }
  6590. static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
  6591. struct btrfs_root *root,
  6592. struct btrfs_path *path,
  6593. struct walk_control *wc)
  6594. {
  6595. int level = wc->level;
  6596. int lookup_info = 1;
  6597. int ret;
  6598. while (level >= 0) {
  6599. ret = walk_down_proc(trans, root, path, wc, lookup_info);
  6600. if (ret > 0)
  6601. break;
  6602. if (level == 0)
  6603. break;
  6604. if (path->slots[level] >=
  6605. btrfs_header_nritems(path->nodes[level]))
  6606. break;
  6607. ret = do_walk_down(trans, root, path, wc, &lookup_info);
  6608. if (ret > 0) {
  6609. path->slots[level]++;
  6610. continue;
  6611. } else if (ret < 0)
  6612. return ret;
  6613. level = wc->level;
  6614. }
  6615. return 0;
  6616. }
  6617. static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  6618. struct btrfs_root *root,
  6619. struct btrfs_path *path,
  6620. struct walk_control *wc, int max_level)
  6621. {
  6622. int level = wc->level;
  6623. int ret;
  6624. path->slots[level] = btrfs_header_nritems(path->nodes[level]);
  6625. while (level < max_level && path->nodes[level]) {
  6626. wc->level = level;
  6627. if (path->slots[level] + 1 <
  6628. btrfs_header_nritems(path->nodes[level])) {
  6629. path->slots[level]++;
  6630. return 0;
  6631. } else {
  6632. ret = walk_up_proc(trans, root, path, wc);
  6633. if (ret > 0)
  6634. return 0;
  6635. if (path->locks[level]) {
  6636. btrfs_tree_unlock_rw(path->nodes[level],
  6637. path->locks[level]);
  6638. path->locks[level] = 0;
  6639. }
  6640. free_extent_buffer(path->nodes[level]);
  6641. path->nodes[level] = NULL;
  6642. level++;
  6643. }
  6644. }
  6645. return 1;
  6646. }
  6647. /*
  6648. * drop a subvolume tree.
  6649. *
  6650. * this function traverses the tree freeing any blocks that only
  6651. * referenced by the tree.
  6652. *
  6653. * when a shared tree block is found. this function decreases its
  6654. * reference count by one. if update_ref is true, this function
  6655. * also make sure backrefs for the shared block and all lower level
  6656. * blocks are properly updated.
  6657. *
  6658. * If called with for_reloc == 0, may exit early with -EAGAIN
  6659. */
  6660. int btrfs_drop_snapshot(struct btrfs_root *root,
  6661. struct btrfs_block_rsv *block_rsv, int update_ref,
  6662. int for_reloc)
  6663. {
  6664. struct btrfs_path *path;
  6665. struct btrfs_trans_handle *trans;
  6666. struct btrfs_root *tree_root = root->fs_info->tree_root;
  6667. struct btrfs_root_item *root_item = &root->root_item;
  6668. struct walk_control *wc;
  6669. struct btrfs_key key;
  6670. int err = 0;
  6671. int ret;
  6672. int level;
  6673. bool root_dropped = false;
  6674. path = btrfs_alloc_path();
  6675. if (!path) {
  6676. err = -ENOMEM;
  6677. goto out;
  6678. }
  6679. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  6680. if (!wc) {
  6681. btrfs_free_path(path);
  6682. err = -ENOMEM;
  6683. goto out;
  6684. }
  6685. trans = btrfs_start_transaction(tree_root, 0);
  6686. if (IS_ERR(trans)) {
  6687. err = PTR_ERR(trans);
  6688. goto out_free;
  6689. }
  6690. if (block_rsv)
  6691. trans->block_rsv = block_rsv;
  6692. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  6693. level = btrfs_header_level(root->node);
  6694. path->nodes[level] = btrfs_lock_root_node(root);
  6695. btrfs_set_lock_blocking(path->nodes[level]);
  6696. path->slots[level] = 0;
  6697. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6698. memset(&wc->update_progress, 0,
  6699. sizeof(wc->update_progress));
  6700. } else {
  6701. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  6702. memcpy(&wc->update_progress, &key,
  6703. sizeof(wc->update_progress));
  6704. level = root_item->drop_level;
  6705. BUG_ON(level == 0);
  6706. path->lowest_level = level;
  6707. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  6708. path->lowest_level = 0;
  6709. if (ret < 0) {
  6710. err = ret;
  6711. goto out_end_trans;
  6712. }
  6713. WARN_ON(ret > 0);
  6714. /*
  6715. * unlock our path, this is safe because only this
  6716. * function is allowed to delete this snapshot
  6717. */
  6718. btrfs_unlock_up_safe(path, 0);
  6719. level = btrfs_header_level(root->node);
  6720. while (1) {
  6721. btrfs_tree_lock(path->nodes[level]);
  6722. btrfs_set_lock_blocking(path->nodes[level]);
  6723. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6724. ret = btrfs_lookup_extent_info(trans, root,
  6725. path->nodes[level]->start,
  6726. level, 1, &wc->refs[level],
  6727. &wc->flags[level]);
  6728. if (ret < 0) {
  6729. err = ret;
  6730. goto out_end_trans;
  6731. }
  6732. BUG_ON(wc->refs[level] == 0);
  6733. if (level == root_item->drop_level)
  6734. break;
  6735. btrfs_tree_unlock(path->nodes[level]);
  6736. path->locks[level] = 0;
  6737. WARN_ON(wc->refs[level] != 1);
  6738. level--;
  6739. }
  6740. }
  6741. wc->level = level;
  6742. wc->shared_level = -1;
  6743. wc->stage = DROP_REFERENCE;
  6744. wc->update_ref = update_ref;
  6745. wc->keep_locks = 0;
  6746. wc->for_reloc = for_reloc;
  6747. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  6748. while (1) {
  6749. ret = walk_down_tree(trans, root, path, wc);
  6750. if (ret < 0) {
  6751. err = ret;
  6752. break;
  6753. }
  6754. ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
  6755. if (ret < 0) {
  6756. err = ret;
  6757. break;
  6758. }
  6759. if (ret > 0) {
  6760. BUG_ON(wc->stage != DROP_REFERENCE);
  6761. break;
  6762. }
  6763. if (wc->stage == DROP_REFERENCE) {
  6764. level = wc->level;
  6765. btrfs_node_key(path->nodes[level],
  6766. &root_item->drop_progress,
  6767. path->slots[level]);
  6768. root_item->drop_level = level;
  6769. }
  6770. BUG_ON(wc->level == 0);
  6771. if (btrfs_should_end_transaction(trans, tree_root) ||
  6772. (!for_reloc && btrfs_need_cleaner_sleep(root))) {
  6773. ret = btrfs_update_root(trans, tree_root,
  6774. &root->root_key,
  6775. root_item);
  6776. if (ret) {
  6777. btrfs_abort_transaction(trans, tree_root, ret);
  6778. err = ret;
  6779. goto out_end_trans;
  6780. }
  6781. btrfs_end_transaction_throttle(trans, tree_root);
  6782. if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
  6783. pr_debug("btrfs: drop snapshot early exit\n");
  6784. err = -EAGAIN;
  6785. goto out_free;
  6786. }
  6787. trans = btrfs_start_transaction(tree_root, 0);
  6788. if (IS_ERR(trans)) {
  6789. err = PTR_ERR(trans);
  6790. goto out_free;
  6791. }
  6792. if (block_rsv)
  6793. trans->block_rsv = block_rsv;
  6794. }
  6795. }
  6796. btrfs_release_path(path);
  6797. if (err)
  6798. goto out_end_trans;
  6799. ret = btrfs_del_root(trans, tree_root, &root->root_key);
  6800. if (ret) {
  6801. btrfs_abort_transaction(trans, tree_root, ret);
  6802. goto out_end_trans;
  6803. }
  6804. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  6805. ret = btrfs_find_root(tree_root, &root->root_key, path,
  6806. NULL, NULL);
  6807. if (ret < 0) {
  6808. btrfs_abort_transaction(trans, tree_root, ret);
  6809. err = ret;
  6810. goto out_end_trans;
  6811. } else if (ret > 0) {
  6812. /* if we fail to delete the orphan item this time
  6813. * around, it'll get picked up the next time.
  6814. *
  6815. * The most common failure here is just -ENOENT.
  6816. */
  6817. btrfs_del_orphan_item(trans, tree_root,
  6818. root->root_key.objectid);
  6819. }
  6820. }
  6821. if (root->in_radix) {
  6822. btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
  6823. } else {
  6824. free_extent_buffer(root->node);
  6825. free_extent_buffer(root->commit_root);
  6826. btrfs_put_fs_root(root);
  6827. }
  6828. root_dropped = true;
  6829. out_end_trans:
  6830. btrfs_end_transaction_throttle(trans, tree_root);
  6831. out_free:
  6832. kfree(wc);
  6833. btrfs_free_path(path);
  6834. out:
  6835. /*
  6836. * So if we need to stop dropping the snapshot for whatever reason we
  6837. * need to make sure to add it back to the dead root list so that we
  6838. * keep trying to do the work later. This also cleans up roots if we
  6839. * don't have it in the radix (like when we recover after a power fail
  6840. * or unmount) so we don't leak memory.
  6841. */
  6842. if (!for_reloc && root_dropped == false)
  6843. btrfs_add_dead_root(root);
  6844. if (err)
  6845. btrfs_std_error(root->fs_info, err);
  6846. return err;
  6847. }
  6848. /*
  6849. * drop subtree rooted at tree block 'node'.
  6850. *
  6851. * NOTE: this function will unlock and release tree block 'node'
  6852. * only used by relocation code
  6853. */
  6854. int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
  6855. struct btrfs_root *root,
  6856. struct extent_buffer *node,
  6857. struct extent_buffer *parent)
  6858. {
  6859. struct btrfs_path *path;
  6860. struct walk_control *wc;
  6861. int level;
  6862. int parent_level;
  6863. int ret = 0;
  6864. int wret;
  6865. BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  6866. path = btrfs_alloc_path();
  6867. if (!path)
  6868. return -ENOMEM;
  6869. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  6870. if (!wc) {
  6871. btrfs_free_path(path);
  6872. return -ENOMEM;
  6873. }
  6874. btrfs_assert_tree_locked(parent);
  6875. parent_level = btrfs_header_level(parent);
  6876. extent_buffer_get(parent);
  6877. path->nodes[parent_level] = parent;
  6878. path->slots[parent_level] = btrfs_header_nritems(parent);
  6879. btrfs_assert_tree_locked(node);
  6880. level = btrfs_header_level(node);
  6881. path->nodes[level] = node;
  6882. path->slots[level] = 0;
  6883. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6884. wc->refs[parent_level] = 1;
  6885. wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  6886. wc->level = level;
  6887. wc->shared_level = -1;
  6888. wc->stage = DROP_REFERENCE;
  6889. wc->update_ref = 0;
  6890. wc->keep_locks = 1;
  6891. wc->for_reloc = 1;
  6892. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  6893. while (1) {
  6894. wret = walk_down_tree(trans, root, path, wc);
  6895. if (wret < 0) {
  6896. ret = wret;
  6897. break;
  6898. }
  6899. wret = walk_up_tree(trans, root, path, wc, parent_level);
  6900. if (wret < 0)
  6901. ret = wret;
  6902. if (wret != 0)
  6903. break;
  6904. }
  6905. kfree(wc);
  6906. btrfs_free_path(path);
  6907. return ret;
  6908. }
  6909. static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
  6910. {
  6911. u64 num_devices;
  6912. u64 stripped;
  6913. /*
  6914. * if restripe for this chunk_type is on pick target profile and
  6915. * return, otherwise do the usual balance
  6916. */
  6917. stripped = get_restripe_target(root->fs_info, flags);
  6918. if (stripped)
  6919. return extended_to_chunk(stripped);
  6920. /*
  6921. * we add in the count of missing devices because we want
  6922. * to make sure that any RAID levels on a degraded FS
  6923. * continue to be honored.
  6924. */
  6925. num_devices = root->fs_info->fs_devices->rw_devices +
  6926. root->fs_info->fs_devices->missing_devices;
  6927. stripped = BTRFS_BLOCK_GROUP_RAID0 |
  6928. BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
  6929. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
  6930. if (num_devices == 1) {
  6931. stripped |= BTRFS_BLOCK_GROUP_DUP;
  6932. stripped = flags & ~stripped;
  6933. /* turn raid0 into single device chunks */
  6934. if (flags & BTRFS_BLOCK_GROUP_RAID0)
  6935. return stripped;
  6936. /* turn mirroring into duplication */
  6937. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  6938. BTRFS_BLOCK_GROUP_RAID10))
  6939. return stripped | BTRFS_BLOCK_GROUP_DUP;
  6940. } else {
  6941. /* they already had raid on here, just return */
  6942. if (flags & stripped)
  6943. return flags;
  6944. stripped |= BTRFS_BLOCK_GROUP_DUP;
  6945. stripped = flags & ~stripped;
  6946. /* switch duplicated blocks with raid1 */
  6947. if (flags & BTRFS_BLOCK_GROUP_DUP)
  6948. return stripped | BTRFS_BLOCK_GROUP_RAID1;
  6949. /* this is drive concat, leave it alone */
  6950. }
  6951. return flags;
  6952. }
  6953. static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
  6954. {
  6955. struct btrfs_space_info *sinfo = cache->space_info;
  6956. u64 num_bytes;
  6957. u64 min_allocable_bytes;
  6958. int ret = -ENOSPC;
  6959. /*
  6960. * We need some metadata space and system metadata space for
  6961. * allocating chunks in some corner cases until we force to set
  6962. * it to be readonly.
  6963. */
  6964. if ((sinfo->flags &
  6965. (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
  6966. !force)
  6967. min_allocable_bytes = 1 * 1024 * 1024;
  6968. else
  6969. min_allocable_bytes = 0;
  6970. spin_lock(&sinfo->lock);
  6971. spin_lock(&cache->lock);
  6972. if (cache->ro) {
  6973. ret = 0;
  6974. goto out;
  6975. }
  6976. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  6977. cache->bytes_super - btrfs_block_group_used(&cache->item);
  6978. if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
  6979. sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
  6980. min_allocable_bytes <= sinfo->total_bytes) {
  6981. sinfo->bytes_readonly += num_bytes;
  6982. cache->ro = 1;
  6983. ret = 0;
  6984. }
  6985. out:
  6986. spin_unlock(&cache->lock);
  6987. spin_unlock(&sinfo->lock);
  6988. return ret;
  6989. }
  6990. int btrfs_set_block_group_ro(struct btrfs_root *root,
  6991. struct btrfs_block_group_cache *cache)
  6992. {
  6993. struct btrfs_trans_handle *trans;
  6994. u64 alloc_flags;
  6995. int ret;
  6996. BUG_ON(cache->ro);
  6997. trans = btrfs_join_transaction(root);
  6998. if (IS_ERR(trans))
  6999. return PTR_ERR(trans);
  7000. alloc_flags = update_block_group_flags(root, cache->flags);
  7001. if (alloc_flags != cache->flags) {
  7002. ret = do_chunk_alloc(trans, root, alloc_flags,
  7003. CHUNK_ALLOC_FORCE);
  7004. if (ret < 0)
  7005. goto out;
  7006. }
  7007. ret = set_block_group_ro(cache, 0);
  7008. if (!ret)
  7009. goto out;
  7010. alloc_flags = get_alloc_profile(root, cache->space_info->flags);
  7011. ret = do_chunk_alloc(trans, root, alloc_flags,
  7012. CHUNK_ALLOC_FORCE);
  7013. if (ret < 0)
  7014. goto out;
  7015. ret = set_block_group_ro(cache, 0);
  7016. out:
  7017. btrfs_end_transaction(trans, root);
  7018. return ret;
  7019. }
  7020. int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
  7021. struct btrfs_root *root, u64 type)
  7022. {
  7023. u64 alloc_flags = get_alloc_profile(root, type);
  7024. return do_chunk_alloc(trans, root, alloc_flags,
  7025. CHUNK_ALLOC_FORCE);
  7026. }
  7027. /*
  7028. * helper to account the unused space of all the readonly block group in the
  7029. * list. takes mirrors into account.
  7030. */
  7031. static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
  7032. {
  7033. struct btrfs_block_group_cache *block_group;
  7034. u64 free_bytes = 0;
  7035. int factor;
  7036. list_for_each_entry(block_group, groups_list, list) {
  7037. spin_lock(&block_group->lock);
  7038. if (!block_group->ro) {
  7039. spin_unlock(&block_group->lock);
  7040. continue;
  7041. }
  7042. if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
  7043. BTRFS_BLOCK_GROUP_RAID10 |
  7044. BTRFS_BLOCK_GROUP_DUP))
  7045. factor = 2;
  7046. else
  7047. factor = 1;
  7048. free_bytes += (block_group->key.offset -
  7049. btrfs_block_group_used(&block_group->item)) *
  7050. factor;
  7051. spin_unlock(&block_group->lock);
  7052. }
  7053. return free_bytes;
  7054. }
  7055. /*
  7056. * helper to account the unused space of all the readonly block group in the
  7057. * space_info. takes mirrors into account.
  7058. */
  7059. u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
  7060. {
  7061. int i;
  7062. u64 free_bytes = 0;
  7063. spin_lock(&sinfo->lock);
  7064. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  7065. if (!list_empty(&sinfo->block_groups[i]))
  7066. free_bytes += __btrfs_get_ro_block_group_free_space(
  7067. &sinfo->block_groups[i]);
  7068. spin_unlock(&sinfo->lock);
  7069. return free_bytes;
  7070. }
  7071. void btrfs_set_block_group_rw(struct btrfs_root *root,
  7072. struct btrfs_block_group_cache *cache)
  7073. {
  7074. struct btrfs_space_info *sinfo = cache->space_info;
  7075. u64 num_bytes;
  7076. BUG_ON(!cache->ro);
  7077. spin_lock(&sinfo->lock);
  7078. spin_lock(&cache->lock);
  7079. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  7080. cache->bytes_super - btrfs_block_group_used(&cache->item);
  7081. sinfo->bytes_readonly -= num_bytes;
  7082. cache->ro = 0;
  7083. spin_unlock(&cache->lock);
  7084. spin_unlock(&sinfo->lock);
  7085. }
  7086. /*
  7087. * checks to see if its even possible to relocate this block group.
  7088. *
  7089. * @return - -1 if it's not a good idea to relocate this block group, 0 if its
  7090. * ok to go ahead and try.
  7091. */
  7092. int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
  7093. {
  7094. struct btrfs_block_group_cache *block_group;
  7095. struct btrfs_space_info *space_info;
  7096. struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
  7097. struct btrfs_device *device;
  7098. struct btrfs_trans_handle *trans;
  7099. u64 min_free;
  7100. u64 dev_min = 1;
  7101. u64 dev_nr = 0;
  7102. u64 target;
  7103. int index;
  7104. int full = 0;
  7105. int ret = 0;
  7106. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  7107. /* odd, couldn't find the block group, leave it alone */
  7108. if (!block_group)
  7109. return -1;
  7110. min_free = btrfs_block_group_used(&block_group->item);
  7111. /* no bytes used, we're good */
  7112. if (!min_free)
  7113. goto out;
  7114. space_info = block_group->space_info;
  7115. spin_lock(&space_info->lock);
  7116. full = space_info->full;
  7117. /*
  7118. * if this is the last block group we have in this space, we can't
  7119. * relocate it unless we're able to allocate a new chunk below.
  7120. *
  7121. * Otherwise, we need to make sure we have room in the space to handle
  7122. * all of the extents from this block group. If we can, we're good
  7123. */
  7124. if ((space_info->total_bytes != block_group->key.offset) &&
  7125. (space_info->bytes_used + space_info->bytes_reserved +
  7126. space_info->bytes_pinned + space_info->bytes_readonly +
  7127. min_free < space_info->total_bytes)) {
  7128. spin_unlock(&space_info->lock);
  7129. goto out;
  7130. }
  7131. spin_unlock(&space_info->lock);
  7132. /*
  7133. * ok we don't have enough space, but maybe we have free space on our
  7134. * devices to allocate new chunks for relocation, so loop through our
  7135. * alloc devices and guess if we have enough space. if this block
  7136. * group is going to be restriped, run checks against the target
  7137. * profile instead of the current one.
  7138. */
  7139. ret = -1;
  7140. /*
  7141. * index:
  7142. * 0: raid10
  7143. * 1: raid1
  7144. * 2: dup
  7145. * 3: raid0
  7146. * 4: single
  7147. */
  7148. target = get_restripe_target(root->fs_info, block_group->flags);
  7149. if (target) {
  7150. index = __get_raid_index(extended_to_chunk(target));
  7151. } else {
  7152. /*
  7153. * this is just a balance, so if we were marked as full
  7154. * we know there is no space for a new chunk
  7155. */
  7156. if (full)
  7157. goto out;
  7158. index = get_block_group_index(block_group);
  7159. }
  7160. if (index == BTRFS_RAID_RAID10) {
  7161. dev_min = 4;
  7162. /* Divide by 2 */
  7163. min_free >>= 1;
  7164. } else if (index == BTRFS_RAID_RAID1) {
  7165. dev_min = 2;
  7166. } else if (index == BTRFS_RAID_DUP) {
  7167. /* Multiply by 2 */
  7168. min_free <<= 1;
  7169. } else if (index == BTRFS_RAID_RAID0) {
  7170. dev_min = fs_devices->rw_devices;
  7171. do_div(min_free, dev_min);
  7172. }
  7173. /* We need to do this so that we can look at pending chunks */
  7174. trans = btrfs_join_transaction(root);
  7175. if (IS_ERR(trans)) {
  7176. ret = PTR_ERR(trans);
  7177. goto out;
  7178. }
  7179. mutex_lock(&root->fs_info->chunk_mutex);
  7180. list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
  7181. u64 dev_offset;
  7182. /*
  7183. * check to make sure we can actually find a chunk with enough
  7184. * space to fit our block group in.
  7185. */
  7186. if (device->total_bytes > device->bytes_used + min_free &&
  7187. !device->is_tgtdev_for_dev_replace) {
  7188. ret = find_free_dev_extent(trans, device, min_free,
  7189. &dev_offset, NULL);
  7190. if (!ret)
  7191. dev_nr++;
  7192. if (dev_nr >= dev_min)
  7193. break;
  7194. ret = -1;
  7195. }
  7196. }
  7197. mutex_unlock(&root->fs_info->chunk_mutex);
  7198. btrfs_end_transaction(trans, root);
  7199. out:
  7200. btrfs_put_block_group(block_group);
  7201. return ret;
  7202. }
  7203. static int find_first_block_group(struct btrfs_root *root,
  7204. struct btrfs_path *path, struct btrfs_key *key)
  7205. {
  7206. int ret = 0;
  7207. struct btrfs_key found_key;
  7208. struct extent_buffer *leaf;
  7209. int slot;
  7210. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  7211. if (ret < 0)
  7212. goto out;
  7213. while (1) {
  7214. slot = path->slots[0];
  7215. leaf = path->nodes[0];
  7216. if (slot >= btrfs_header_nritems(leaf)) {
  7217. ret = btrfs_next_leaf(root, path);
  7218. if (ret == 0)
  7219. continue;
  7220. if (ret < 0)
  7221. goto out;
  7222. break;
  7223. }
  7224. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  7225. if (found_key.objectid >= key->objectid &&
  7226. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  7227. ret = 0;
  7228. goto out;
  7229. }
  7230. path->slots[0]++;
  7231. }
  7232. out:
  7233. return ret;
  7234. }
  7235. void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
  7236. {
  7237. struct btrfs_block_group_cache *block_group;
  7238. u64 last = 0;
  7239. while (1) {
  7240. struct inode *inode;
  7241. block_group = btrfs_lookup_first_block_group(info, last);
  7242. while (block_group) {
  7243. spin_lock(&block_group->lock);
  7244. if (block_group->iref)
  7245. break;
  7246. spin_unlock(&block_group->lock);
  7247. block_group = next_block_group(info->tree_root,
  7248. block_group);
  7249. }
  7250. if (!block_group) {
  7251. if (last == 0)
  7252. break;
  7253. last = 0;
  7254. continue;
  7255. }
  7256. inode = block_group->inode;
  7257. block_group->iref = 0;
  7258. block_group->inode = NULL;
  7259. spin_unlock(&block_group->lock);
  7260. iput(inode);
  7261. last = block_group->key.objectid + block_group->key.offset;
  7262. btrfs_put_block_group(block_group);
  7263. }
  7264. }
  7265. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  7266. {
  7267. struct btrfs_block_group_cache *block_group;
  7268. struct btrfs_space_info *space_info;
  7269. struct btrfs_caching_control *caching_ctl;
  7270. struct rb_node *n;
  7271. down_write(&info->extent_commit_sem);
  7272. while (!list_empty(&info->caching_block_groups)) {
  7273. caching_ctl = list_entry(info->caching_block_groups.next,
  7274. struct btrfs_caching_control, list);
  7275. list_del(&caching_ctl->list);
  7276. put_caching_control(caching_ctl);
  7277. }
  7278. up_write(&info->extent_commit_sem);
  7279. spin_lock(&info->block_group_cache_lock);
  7280. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  7281. block_group = rb_entry(n, struct btrfs_block_group_cache,
  7282. cache_node);
  7283. rb_erase(&block_group->cache_node,
  7284. &info->block_group_cache_tree);
  7285. spin_unlock(&info->block_group_cache_lock);
  7286. down_write(&block_group->space_info->groups_sem);
  7287. list_del(&block_group->list);
  7288. up_write(&block_group->space_info->groups_sem);
  7289. if (block_group->cached == BTRFS_CACHE_STARTED)
  7290. wait_block_group_cache_done(block_group);
  7291. /*
  7292. * We haven't cached this block group, which means we could
  7293. * possibly have excluded extents on this block group.
  7294. */
  7295. if (block_group->cached == BTRFS_CACHE_NO ||
  7296. block_group->cached == BTRFS_CACHE_ERROR)
  7297. free_excluded_extents(info->extent_root, block_group);
  7298. btrfs_remove_free_space_cache(block_group);
  7299. btrfs_put_block_group(block_group);
  7300. spin_lock(&info->block_group_cache_lock);
  7301. }
  7302. spin_unlock(&info->block_group_cache_lock);
  7303. /* now that all the block groups are freed, go through and
  7304. * free all the space_info structs. This is only called during
  7305. * the final stages of unmount, and so we know nobody is
  7306. * using them. We call synchronize_rcu() once before we start,
  7307. * just to be on the safe side.
  7308. */
  7309. synchronize_rcu();
  7310. release_global_block_rsv(info);
  7311. while (!list_empty(&info->space_info)) {
  7312. space_info = list_entry(info->space_info.next,
  7313. struct btrfs_space_info,
  7314. list);
  7315. if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
  7316. if (WARN_ON(space_info->bytes_pinned > 0 ||
  7317. space_info->bytes_reserved > 0 ||
  7318. space_info->bytes_may_use > 0)) {
  7319. dump_space_info(space_info, 0, 0);
  7320. }
  7321. }
  7322. percpu_counter_destroy(&space_info->total_bytes_pinned);
  7323. list_del(&space_info->list);
  7324. kfree(space_info);
  7325. }
  7326. return 0;
  7327. }
  7328. static void __link_block_group(struct btrfs_space_info *space_info,
  7329. struct btrfs_block_group_cache *cache)
  7330. {
  7331. int index = get_block_group_index(cache);
  7332. down_write(&space_info->groups_sem);
  7333. list_add_tail(&cache->list, &space_info->block_groups[index]);
  7334. up_write(&space_info->groups_sem);
  7335. }
  7336. int btrfs_read_block_groups(struct btrfs_root *root)
  7337. {
  7338. struct btrfs_path *path;
  7339. int ret;
  7340. struct btrfs_block_group_cache *cache;
  7341. struct btrfs_fs_info *info = root->fs_info;
  7342. struct btrfs_space_info *space_info;
  7343. struct btrfs_key key;
  7344. struct btrfs_key found_key;
  7345. struct extent_buffer *leaf;
  7346. int need_clear = 0;
  7347. u64 cache_gen;
  7348. root = info->extent_root;
  7349. key.objectid = 0;
  7350. key.offset = 0;
  7351. btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
  7352. path = btrfs_alloc_path();
  7353. if (!path)
  7354. return -ENOMEM;
  7355. path->reada = 1;
  7356. cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
  7357. if (btrfs_test_opt(root, SPACE_CACHE) &&
  7358. btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
  7359. need_clear = 1;
  7360. if (btrfs_test_opt(root, CLEAR_CACHE))
  7361. need_clear = 1;
  7362. while (1) {
  7363. ret = find_first_block_group(root, path, &key);
  7364. if (ret > 0)
  7365. break;
  7366. if (ret != 0)
  7367. goto error;
  7368. leaf = path->nodes[0];
  7369. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  7370. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  7371. if (!cache) {
  7372. ret = -ENOMEM;
  7373. goto error;
  7374. }
  7375. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  7376. GFP_NOFS);
  7377. if (!cache->free_space_ctl) {
  7378. kfree(cache);
  7379. ret = -ENOMEM;
  7380. goto error;
  7381. }
  7382. atomic_set(&cache->count, 1);
  7383. spin_lock_init(&cache->lock);
  7384. cache->fs_info = info;
  7385. INIT_LIST_HEAD(&cache->list);
  7386. INIT_LIST_HEAD(&cache->cluster_list);
  7387. if (need_clear) {
  7388. /*
  7389. * When we mount with old space cache, we need to
  7390. * set BTRFS_DC_CLEAR and set dirty flag.
  7391. *
  7392. * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
  7393. * truncate the old free space cache inode and
  7394. * setup a new one.
  7395. * b) Setting 'dirty flag' makes sure that we flush
  7396. * the new space cache info onto disk.
  7397. */
  7398. cache->disk_cache_state = BTRFS_DC_CLEAR;
  7399. if (btrfs_test_opt(root, SPACE_CACHE))
  7400. cache->dirty = 1;
  7401. }
  7402. read_extent_buffer(leaf, &cache->item,
  7403. btrfs_item_ptr_offset(leaf, path->slots[0]),
  7404. sizeof(cache->item));
  7405. memcpy(&cache->key, &found_key, sizeof(found_key));
  7406. key.objectid = found_key.objectid + found_key.offset;
  7407. btrfs_release_path(path);
  7408. cache->flags = btrfs_block_group_flags(&cache->item);
  7409. cache->sectorsize = root->sectorsize;
  7410. cache->full_stripe_len = btrfs_full_stripe_len(root,
  7411. &root->fs_info->mapping_tree,
  7412. found_key.objectid);
  7413. btrfs_init_free_space_ctl(cache);
  7414. /*
  7415. * We need to exclude the super stripes now so that the space
  7416. * info has super bytes accounted for, otherwise we'll think
  7417. * we have more space than we actually do.
  7418. */
  7419. ret = exclude_super_stripes(root, cache);
  7420. if (ret) {
  7421. /*
  7422. * We may have excluded something, so call this just in
  7423. * case.
  7424. */
  7425. free_excluded_extents(root, cache);
  7426. kfree(cache->free_space_ctl);
  7427. kfree(cache);
  7428. goto error;
  7429. }
  7430. /*
  7431. * check for two cases, either we are full, and therefore
  7432. * don't need to bother with the caching work since we won't
  7433. * find any space, or we are empty, and we can just add all
  7434. * the space in and be done with it. This saves us _alot_ of
  7435. * time, particularly in the full case.
  7436. */
  7437. if (found_key.offset == btrfs_block_group_used(&cache->item)) {
  7438. cache->last_byte_to_unpin = (u64)-1;
  7439. cache->cached = BTRFS_CACHE_FINISHED;
  7440. free_excluded_extents(root, cache);
  7441. } else if (btrfs_block_group_used(&cache->item) == 0) {
  7442. cache->last_byte_to_unpin = (u64)-1;
  7443. cache->cached = BTRFS_CACHE_FINISHED;
  7444. add_new_free_space(cache, root->fs_info,
  7445. found_key.objectid,
  7446. found_key.objectid +
  7447. found_key.offset);
  7448. free_excluded_extents(root, cache);
  7449. }
  7450. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  7451. if (ret) {
  7452. btrfs_remove_free_space_cache(cache);
  7453. btrfs_put_block_group(cache);
  7454. goto error;
  7455. }
  7456. ret = update_space_info(info, cache->flags, found_key.offset,
  7457. btrfs_block_group_used(&cache->item),
  7458. &space_info);
  7459. if (ret) {
  7460. btrfs_remove_free_space_cache(cache);
  7461. spin_lock(&info->block_group_cache_lock);
  7462. rb_erase(&cache->cache_node,
  7463. &info->block_group_cache_tree);
  7464. spin_unlock(&info->block_group_cache_lock);
  7465. btrfs_put_block_group(cache);
  7466. goto error;
  7467. }
  7468. cache->space_info = space_info;
  7469. spin_lock(&cache->space_info->lock);
  7470. cache->space_info->bytes_readonly += cache->bytes_super;
  7471. spin_unlock(&cache->space_info->lock);
  7472. __link_block_group(space_info, cache);
  7473. set_avail_alloc_bits(root->fs_info, cache->flags);
  7474. if (btrfs_chunk_readonly(root, cache->key.objectid))
  7475. set_block_group_ro(cache, 1);
  7476. }
  7477. list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
  7478. if (!(get_alloc_profile(root, space_info->flags) &
  7479. (BTRFS_BLOCK_GROUP_RAID10 |
  7480. BTRFS_BLOCK_GROUP_RAID1 |
  7481. BTRFS_BLOCK_GROUP_RAID5 |
  7482. BTRFS_BLOCK_GROUP_RAID6 |
  7483. BTRFS_BLOCK_GROUP_DUP)))
  7484. continue;
  7485. /*
  7486. * avoid allocating from un-mirrored block group if there are
  7487. * mirrored block groups.
  7488. */
  7489. list_for_each_entry(cache,
  7490. &space_info->block_groups[BTRFS_RAID_RAID0],
  7491. list)
  7492. set_block_group_ro(cache, 1);
  7493. list_for_each_entry(cache,
  7494. &space_info->block_groups[BTRFS_RAID_SINGLE],
  7495. list)
  7496. set_block_group_ro(cache, 1);
  7497. }
  7498. init_global_block_rsv(info);
  7499. ret = 0;
  7500. error:
  7501. btrfs_free_path(path);
  7502. return ret;
  7503. }
  7504. void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
  7505. struct btrfs_root *root)
  7506. {
  7507. struct btrfs_block_group_cache *block_group, *tmp;
  7508. struct btrfs_root *extent_root = root->fs_info->extent_root;
  7509. struct btrfs_block_group_item item;
  7510. struct btrfs_key key;
  7511. int ret = 0;
  7512. list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
  7513. new_bg_list) {
  7514. list_del_init(&block_group->new_bg_list);
  7515. if (ret)
  7516. continue;
  7517. spin_lock(&block_group->lock);
  7518. memcpy(&item, &block_group->item, sizeof(item));
  7519. memcpy(&key, &block_group->key, sizeof(key));
  7520. spin_unlock(&block_group->lock);
  7521. ret = btrfs_insert_item(trans, extent_root, &key, &item,
  7522. sizeof(item));
  7523. if (ret)
  7524. btrfs_abort_transaction(trans, extent_root, ret);
  7525. ret = btrfs_finish_chunk_alloc(trans, extent_root,
  7526. key.objectid, key.offset);
  7527. if (ret)
  7528. btrfs_abort_transaction(trans, extent_root, ret);
  7529. }
  7530. }
  7531. int btrfs_make_block_group(struct btrfs_trans_handle *trans,
  7532. struct btrfs_root *root, u64 bytes_used,
  7533. u64 type, u64 chunk_objectid, u64 chunk_offset,
  7534. u64 size)
  7535. {
  7536. int ret;
  7537. struct btrfs_root *extent_root;
  7538. struct btrfs_block_group_cache *cache;
  7539. extent_root = root->fs_info->extent_root;
  7540. root->fs_info->last_trans_log_full_commit = trans->transid;
  7541. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  7542. if (!cache)
  7543. return -ENOMEM;
  7544. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  7545. GFP_NOFS);
  7546. if (!cache->free_space_ctl) {
  7547. kfree(cache);
  7548. return -ENOMEM;
  7549. }
  7550. cache->key.objectid = chunk_offset;
  7551. cache->key.offset = size;
  7552. cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  7553. cache->sectorsize = root->sectorsize;
  7554. cache->fs_info = root->fs_info;
  7555. cache->full_stripe_len = btrfs_full_stripe_len(root,
  7556. &root->fs_info->mapping_tree,
  7557. chunk_offset);
  7558. atomic_set(&cache->count, 1);
  7559. spin_lock_init(&cache->lock);
  7560. INIT_LIST_HEAD(&cache->list);
  7561. INIT_LIST_HEAD(&cache->cluster_list);
  7562. INIT_LIST_HEAD(&cache->new_bg_list);
  7563. btrfs_init_free_space_ctl(cache);
  7564. btrfs_set_block_group_used(&cache->item, bytes_used);
  7565. btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
  7566. cache->flags = type;
  7567. btrfs_set_block_group_flags(&cache->item, type);
  7568. cache->last_byte_to_unpin = (u64)-1;
  7569. cache->cached = BTRFS_CACHE_FINISHED;
  7570. ret = exclude_super_stripes(root, cache);
  7571. if (ret) {
  7572. /*
  7573. * We may have excluded something, so call this just in
  7574. * case.
  7575. */
  7576. free_excluded_extents(root, cache);
  7577. kfree(cache->free_space_ctl);
  7578. kfree(cache);
  7579. return ret;
  7580. }
  7581. add_new_free_space(cache, root->fs_info, chunk_offset,
  7582. chunk_offset + size);
  7583. free_excluded_extents(root, cache);
  7584. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  7585. if (ret) {
  7586. btrfs_remove_free_space_cache(cache);
  7587. btrfs_put_block_group(cache);
  7588. return ret;
  7589. }
  7590. ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
  7591. &cache->space_info);
  7592. if (ret) {
  7593. btrfs_remove_free_space_cache(cache);
  7594. spin_lock(&root->fs_info->block_group_cache_lock);
  7595. rb_erase(&cache->cache_node,
  7596. &root->fs_info->block_group_cache_tree);
  7597. spin_unlock(&root->fs_info->block_group_cache_lock);
  7598. btrfs_put_block_group(cache);
  7599. return ret;
  7600. }
  7601. update_global_block_rsv(root->fs_info);
  7602. spin_lock(&cache->space_info->lock);
  7603. cache->space_info->bytes_readonly += cache->bytes_super;
  7604. spin_unlock(&cache->space_info->lock);
  7605. __link_block_group(cache->space_info, cache);
  7606. list_add_tail(&cache->new_bg_list, &trans->new_bgs);
  7607. set_avail_alloc_bits(extent_root->fs_info, type);
  7608. return 0;
  7609. }
  7610. static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  7611. {
  7612. u64 extra_flags = chunk_to_extended(flags) &
  7613. BTRFS_EXTENDED_PROFILE_MASK;
  7614. write_seqlock(&fs_info->profiles_lock);
  7615. if (flags & BTRFS_BLOCK_GROUP_DATA)
  7616. fs_info->avail_data_alloc_bits &= ~extra_flags;
  7617. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  7618. fs_info->avail_metadata_alloc_bits &= ~extra_flags;
  7619. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  7620. fs_info->avail_system_alloc_bits &= ~extra_flags;
  7621. write_sequnlock(&fs_info->profiles_lock);
  7622. }
  7623. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  7624. struct btrfs_root *root, u64 group_start)
  7625. {
  7626. struct btrfs_path *path;
  7627. struct btrfs_block_group_cache *block_group;
  7628. struct btrfs_free_cluster *cluster;
  7629. struct btrfs_root *tree_root = root->fs_info->tree_root;
  7630. struct btrfs_key key;
  7631. struct inode *inode;
  7632. int ret;
  7633. int index;
  7634. int factor;
  7635. root = root->fs_info->extent_root;
  7636. block_group = btrfs_lookup_block_group(root->fs_info, group_start);
  7637. BUG_ON(!block_group);
  7638. BUG_ON(!block_group->ro);
  7639. /*
  7640. * Free the reserved super bytes from this block group before
  7641. * remove it.
  7642. */
  7643. free_excluded_extents(root, block_group);
  7644. memcpy(&key, &block_group->key, sizeof(key));
  7645. index = get_block_group_index(block_group);
  7646. if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
  7647. BTRFS_BLOCK_GROUP_RAID1 |
  7648. BTRFS_BLOCK_GROUP_RAID10))
  7649. factor = 2;
  7650. else
  7651. factor = 1;
  7652. /* make sure this block group isn't part of an allocation cluster */
  7653. cluster = &root->fs_info->data_alloc_cluster;
  7654. spin_lock(&cluster->refill_lock);
  7655. btrfs_return_cluster_to_free_space(block_group, cluster);
  7656. spin_unlock(&cluster->refill_lock);
  7657. /*
  7658. * make sure this block group isn't part of a metadata
  7659. * allocation cluster
  7660. */
  7661. cluster = &root->fs_info->meta_alloc_cluster;
  7662. spin_lock(&cluster->refill_lock);
  7663. btrfs_return_cluster_to_free_space(block_group, cluster);
  7664. spin_unlock(&cluster->refill_lock);
  7665. path = btrfs_alloc_path();
  7666. if (!path) {
  7667. ret = -ENOMEM;
  7668. goto out;
  7669. }
  7670. inode = lookup_free_space_inode(tree_root, block_group, path);
  7671. if (!IS_ERR(inode)) {
  7672. ret = btrfs_orphan_add(trans, inode);
  7673. if (ret) {
  7674. btrfs_add_delayed_iput(inode);
  7675. goto out;
  7676. }
  7677. clear_nlink(inode);
  7678. /* One for the block groups ref */
  7679. spin_lock(&block_group->lock);
  7680. if (block_group->iref) {
  7681. block_group->iref = 0;
  7682. block_group->inode = NULL;
  7683. spin_unlock(&block_group->lock);
  7684. iput(inode);
  7685. } else {
  7686. spin_unlock(&block_group->lock);
  7687. }
  7688. /* One for our lookup ref */
  7689. btrfs_add_delayed_iput(inode);
  7690. }
  7691. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  7692. key.offset = block_group->key.objectid;
  7693. key.type = 0;
  7694. ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
  7695. if (ret < 0)
  7696. goto out;
  7697. if (ret > 0)
  7698. btrfs_release_path(path);
  7699. if (ret == 0) {
  7700. ret = btrfs_del_item(trans, tree_root, path);
  7701. if (ret)
  7702. goto out;
  7703. btrfs_release_path(path);
  7704. }
  7705. spin_lock(&root->fs_info->block_group_cache_lock);
  7706. rb_erase(&block_group->cache_node,
  7707. &root->fs_info->block_group_cache_tree);
  7708. if (root->fs_info->first_logical_byte == block_group->key.objectid)
  7709. root->fs_info->first_logical_byte = (u64)-1;
  7710. spin_unlock(&root->fs_info->block_group_cache_lock);
  7711. down_write(&block_group->space_info->groups_sem);
  7712. /*
  7713. * we must use list_del_init so people can check to see if they
  7714. * are still on the list after taking the semaphore
  7715. */
  7716. list_del_init(&block_group->list);
  7717. if (list_empty(&block_group->space_info->block_groups[index]))
  7718. clear_avail_alloc_bits(root->fs_info, block_group->flags);
  7719. up_write(&block_group->space_info->groups_sem);
  7720. if (block_group->cached == BTRFS_CACHE_STARTED)
  7721. wait_block_group_cache_done(block_group);
  7722. btrfs_remove_free_space_cache(block_group);
  7723. spin_lock(&block_group->space_info->lock);
  7724. block_group->space_info->total_bytes -= block_group->key.offset;
  7725. block_group->space_info->bytes_readonly -= block_group->key.offset;
  7726. block_group->space_info->disk_total -= block_group->key.offset * factor;
  7727. spin_unlock(&block_group->space_info->lock);
  7728. memcpy(&key, &block_group->key, sizeof(key));
  7729. btrfs_clear_space_info_full(root->fs_info);
  7730. btrfs_put_block_group(block_group);
  7731. btrfs_put_block_group(block_group);
  7732. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  7733. if (ret > 0)
  7734. ret = -EIO;
  7735. if (ret < 0)
  7736. goto out;
  7737. ret = btrfs_del_item(trans, root, path);
  7738. out:
  7739. btrfs_free_path(path);
  7740. return ret;
  7741. }
  7742. int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
  7743. {
  7744. struct btrfs_space_info *space_info;
  7745. struct btrfs_super_block *disk_super;
  7746. u64 features;
  7747. u64 flags;
  7748. int mixed = 0;
  7749. int ret;
  7750. disk_super = fs_info->super_copy;
  7751. if (!btrfs_super_root(disk_super))
  7752. return 1;
  7753. features = btrfs_super_incompat_flags(disk_super);
  7754. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  7755. mixed = 1;
  7756. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  7757. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7758. if (ret)
  7759. goto out;
  7760. if (mixed) {
  7761. flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
  7762. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7763. } else {
  7764. flags = BTRFS_BLOCK_GROUP_METADATA;
  7765. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7766. if (ret)
  7767. goto out;
  7768. flags = BTRFS_BLOCK_GROUP_DATA;
  7769. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7770. }
  7771. out:
  7772. return ret;
  7773. }
  7774. int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  7775. {
  7776. return unpin_extent_range(root, start, end);
  7777. }
  7778. int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
  7779. u64 num_bytes, u64 *actual_bytes)
  7780. {
  7781. return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
  7782. }
  7783. int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
  7784. {
  7785. struct btrfs_fs_info *fs_info = root->fs_info;
  7786. struct btrfs_block_group_cache *cache = NULL;
  7787. u64 group_trimmed;
  7788. u64 start;
  7789. u64 end;
  7790. u64 trimmed = 0;
  7791. u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
  7792. int ret = 0;
  7793. /*
  7794. * try to trim all FS space, our block group may start from non-zero.
  7795. */
  7796. if (range->len == total_bytes)
  7797. cache = btrfs_lookup_first_block_group(fs_info, range->start);
  7798. else
  7799. cache = btrfs_lookup_block_group(fs_info, range->start);
  7800. while (cache) {
  7801. if (cache->key.objectid >= (range->start + range->len)) {
  7802. btrfs_put_block_group(cache);
  7803. break;
  7804. }
  7805. start = max(range->start, cache->key.objectid);
  7806. end = min(range->start + range->len,
  7807. cache->key.objectid + cache->key.offset);
  7808. if (end - start >= range->minlen) {
  7809. if (!block_group_cache_done(cache)) {
  7810. ret = cache_block_group(cache, 0);
  7811. if (ret) {
  7812. btrfs_put_block_group(cache);
  7813. break;
  7814. }
  7815. ret = wait_block_group_cache_done(cache);
  7816. if (ret) {
  7817. btrfs_put_block_group(cache);
  7818. break;
  7819. }
  7820. }
  7821. ret = btrfs_trim_block_group(cache,
  7822. &group_trimmed,
  7823. start,
  7824. end,
  7825. range->minlen);
  7826. trimmed += group_trimmed;
  7827. if (ret) {
  7828. btrfs_put_block_group(cache);
  7829. break;
  7830. }
  7831. }
  7832. cache = next_block_group(fs_info->tree_root, cache);
  7833. }
  7834. range->len = trimmed;
  7835. return ret;
  7836. }