extent-tree.c 223 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/sort.h>
  23. #include <linux/rcupdate.h>
  24. #include <linux/kthread.h>
  25. #include <linux/slab.h>
  26. #include <linux/ratelimit.h>
  27. #include "compat.h"
  28. #include "hash.h"
  29. #include "ctree.h"
  30. #include "disk-io.h"
  31. #include "print-tree.h"
  32. #include "transaction.h"
  33. #include "volumes.h"
  34. #include "raid56.h"
  35. #include "locking.h"
  36. #include "free-space-cache.h"
  37. #include "math.h"
  38. #undef SCRAMBLE_DELAYED_REFS
  39. /*
  40. * control flags for do_chunk_alloc's force field
  41. * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
  42. * if we really need one.
  43. *
  44. * CHUNK_ALLOC_LIMITED means to only try and allocate one
  45. * if we have very few chunks already allocated. This is
  46. * used as part of the clustering code to help make sure
  47. * we have a good pool of storage to cluster in, without
  48. * filling the FS with empty chunks
  49. *
  50. * CHUNK_ALLOC_FORCE means it must try to allocate one
  51. *
  52. */
  53. enum {
  54. CHUNK_ALLOC_NO_FORCE = 0,
  55. CHUNK_ALLOC_LIMITED = 1,
  56. CHUNK_ALLOC_FORCE = 2,
  57. };
  58. /*
  59. * Control how reservations are dealt with.
  60. *
  61. * RESERVE_FREE - freeing a reservation.
  62. * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
  63. * ENOSPC accounting
  64. * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
  65. * bytes_may_use as the ENOSPC accounting is done elsewhere
  66. */
  67. enum {
  68. RESERVE_FREE = 0,
  69. RESERVE_ALLOC = 1,
  70. RESERVE_ALLOC_NO_ACCOUNT = 2,
  71. };
  72. static int update_block_group(struct btrfs_root *root,
  73. u64 bytenr, u64 num_bytes, int alloc);
  74. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  75. struct btrfs_root *root,
  76. u64 bytenr, u64 num_bytes, u64 parent,
  77. u64 root_objectid, u64 owner_objectid,
  78. u64 owner_offset, int refs_to_drop,
  79. struct btrfs_delayed_extent_op *extra_op);
  80. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  81. struct extent_buffer *leaf,
  82. struct btrfs_extent_item *ei);
  83. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  84. struct btrfs_root *root,
  85. u64 parent, u64 root_objectid,
  86. u64 flags, u64 owner, u64 offset,
  87. struct btrfs_key *ins, int ref_mod);
  88. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  89. struct btrfs_root *root,
  90. u64 parent, u64 root_objectid,
  91. u64 flags, struct btrfs_disk_key *key,
  92. int level, struct btrfs_key *ins);
  93. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  94. struct btrfs_root *extent_root, u64 flags,
  95. int force);
  96. static int find_next_key(struct btrfs_path *path, int level,
  97. struct btrfs_key *key);
  98. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  99. int dump_block_groups);
  100. static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
  101. u64 num_bytes, int reserve);
  102. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  103. u64 num_bytes);
  104. static noinline int
  105. block_group_cache_done(struct btrfs_block_group_cache *cache)
  106. {
  107. smp_mb();
  108. return cache->cached == BTRFS_CACHE_FINISHED;
  109. }
  110. static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
  111. {
  112. return (cache->flags & bits) == bits;
  113. }
  114. static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
  115. {
  116. atomic_inc(&cache->count);
  117. }
  118. void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
  119. {
  120. if (atomic_dec_and_test(&cache->count)) {
  121. WARN_ON(cache->pinned > 0);
  122. WARN_ON(cache->reserved > 0);
  123. kfree(cache->free_space_ctl);
  124. kfree(cache);
  125. }
  126. }
  127. /*
  128. * this adds the block group to the fs_info rb tree for the block group
  129. * cache
  130. */
  131. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  132. struct btrfs_block_group_cache *block_group)
  133. {
  134. struct rb_node **p;
  135. struct rb_node *parent = NULL;
  136. struct btrfs_block_group_cache *cache;
  137. spin_lock(&info->block_group_cache_lock);
  138. p = &info->block_group_cache_tree.rb_node;
  139. while (*p) {
  140. parent = *p;
  141. cache = rb_entry(parent, struct btrfs_block_group_cache,
  142. cache_node);
  143. if (block_group->key.objectid < cache->key.objectid) {
  144. p = &(*p)->rb_left;
  145. } else if (block_group->key.objectid > cache->key.objectid) {
  146. p = &(*p)->rb_right;
  147. } else {
  148. spin_unlock(&info->block_group_cache_lock);
  149. return -EEXIST;
  150. }
  151. }
  152. rb_link_node(&block_group->cache_node, parent, p);
  153. rb_insert_color(&block_group->cache_node,
  154. &info->block_group_cache_tree);
  155. if (info->first_logical_byte > block_group->key.objectid)
  156. info->first_logical_byte = block_group->key.objectid;
  157. spin_unlock(&info->block_group_cache_lock);
  158. return 0;
  159. }
  160. /*
  161. * This will return the block group at or after bytenr if contains is 0, else
  162. * it will return the block group that contains the bytenr
  163. */
  164. static struct btrfs_block_group_cache *
  165. block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
  166. int contains)
  167. {
  168. struct btrfs_block_group_cache *cache, *ret = NULL;
  169. struct rb_node *n;
  170. u64 end, start;
  171. spin_lock(&info->block_group_cache_lock);
  172. n = info->block_group_cache_tree.rb_node;
  173. while (n) {
  174. cache = rb_entry(n, struct btrfs_block_group_cache,
  175. cache_node);
  176. end = cache->key.objectid + cache->key.offset - 1;
  177. start = cache->key.objectid;
  178. if (bytenr < start) {
  179. if (!contains && (!ret || start < ret->key.objectid))
  180. ret = cache;
  181. n = n->rb_left;
  182. } else if (bytenr > start) {
  183. if (contains && bytenr <= end) {
  184. ret = cache;
  185. break;
  186. }
  187. n = n->rb_right;
  188. } else {
  189. ret = cache;
  190. break;
  191. }
  192. }
  193. if (ret) {
  194. btrfs_get_block_group(ret);
  195. if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
  196. info->first_logical_byte = ret->key.objectid;
  197. }
  198. spin_unlock(&info->block_group_cache_lock);
  199. return ret;
  200. }
  201. static int add_excluded_extent(struct btrfs_root *root,
  202. u64 start, u64 num_bytes)
  203. {
  204. u64 end = start + num_bytes - 1;
  205. set_extent_bits(&root->fs_info->freed_extents[0],
  206. start, end, EXTENT_UPTODATE, GFP_NOFS);
  207. set_extent_bits(&root->fs_info->freed_extents[1],
  208. start, end, EXTENT_UPTODATE, GFP_NOFS);
  209. return 0;
  210. }
  211. static void free_excluded_extents(struct btrfs_root *root,
  212. struct btrfs_block_group_cache *cache)
  213. {
  214. u64 start, end;
  215. start = cache->key.objectid;
  216. end = start + cache->key.offset - 1;
  217. clear_extent_bits(&root->fs_info->freed_extents[0],
  218. start, end, EXTENT_UPTODATE, GFP_NOFS);
  219. clear_extent_bits(&root->fs_info->freed_extents[1],
  220. start, end, EXTENT_UPTODATE, GFP_NOFS);
  221. }
  222. static int exclude_super_stripes(struct btrfs_root *root,
  223. struct btrfs_block_group_cache *cache)
  224. {
  225. u64 bytenr;
  226. u64 *logical;
  227. int stripe_len;
  228. int i, nr, ret;
  229. if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
  230. stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
  231. cache->bytes_super += stripe_len;
  232. ret = add_excluded_extent(root, cache->key.objectid,
  233. stripe_len);
  234. BUG_ON(ret); /* -ENOMEM */
  235. }
  236. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  237. bytenr = btrfs_sb_offset(i);
  238. ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
  239. cache->key.objectid, bytenr,
  240. 0, &logical, &nr, &stripe_len);
  241. BUG_ON(ret); /* -ENOMEM */
  242. while (nr--) {
  243. cache->bytes_super += stripe_len;
  244. ret = add_excluded_extent(root, logical[nr],
  245. stripe_len);
  246. BUG_ON(ret); /* -ENOMEM */
  247. }
  248. kfree(logical);
  249. }
  250. return 0;
  251. }
  252. static struct btrfs_caching_control *
  253. get_caching_control(struct btrfs_block_group_cache *cache)
  254. {
  255. struct btrfs_caching_control *ctl;
  256. spin_lock(&cache->lock);
  257. if (cache->cached != BTRFS_CACHE_STARTED) {
  258. spin_unlock(&cache->lock);
  259. return NULL;
  260. }
  261. /* We're loading it the fast way, so we don't have a caching_ctl. */
  262. if (!cache->caching_ctl) {
  263. spin_unlock(&cache->lock);
  264. return NULL;
  265. }
  266. ctl = cache->caching_ctl;
  267. atomic_inc(&ctl->count);
  268. spin_unlock(&cache->lock);
  269. return ctl;
  270. }
  271. static void put_caching_control(struct btrfs_caching_control *ctl)
  272. {
  273. if (atomic_dec_and_test(&ctl->count))
  274. kfree(ctl);
  275. }
  276. /*
  277. * this is only called by cache_block_group, since we could have freed extents
  278. * we need to check the pinned_extents for any extents that can't be used yet
  279. * since their free space will be released as soon as the transaction commits.
  280. */
  281. static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
  282. struct btrfs_fs_info *info, u64 start, u64 end)
  283. {
  284. u64 extent_start, extent_end, size, total_added = 0;
  285. int ret;
  286. while (start < end) {
  287. ret = find_first_extent_bit(info->pinned_extents, start,
  288. &extent_start, &extent_end,
  289. EXTENT_DIRTY | EXTENT_UPTODATE,
  290. NULL);
  291. if (ret)
  292. break;
  293. if (extent_start <= start) {
  294. start = extent_end + 1;
  295. } else if (extent_start > start && extent_start < end) {
  296. size = extent_start - start;
  297. total_added += size;
  298. ret = btrfs_add_free_space(block_group, start,
  299. size);
  300. BUG_ON(ret); /* -ENOMEM or logic error */
  301. start = extent_end + 1;
  302. } else {
  303. break;
  304. }
  305. }
  306. if (start < end) {
  307. size = end - start;
  308. total_added += size;
  309. ret = btrfs_add_free_space(block_group, start, size);
  310. BUG_ON(ret); /* -ENOMEM or logic error */
  311. }
  312. return total_added;
  313. }
  314. static noinline void caching_thread(struct btrfs_work *work)
  315. {
  316. struct btrfs_block_group_cache *block_group;
  317. struct btrfs_fs_info *fs_info;
  318. struct btrfs_caching_control *caching_ctl;
  319. struct btrfs_root *extent_root;
  320. struct btrfs_path *path;
  321. struct extent_buffer *leaf;
  322. struct btrfs_key key;
  323. u64 total_found = 0;
  324. u64 last = 0;
  325. u32 nritems;
  326. int ret = 0;
  327. caching_ctl = container_of(work, struct btrfs_caching_control, work);
  328. block_group = caching_ctl->block_group;
  329. fs_info = block_group->fs_info;
  330. extent_root = fs_info->extent_root;
  331. path = btrfs_alloc_path();
  332. if (!path)
  333. goto out;
  334. last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
  335. /*
  336. * We don't want to deadlock with somebody trying to allocate a new
  337. * extent for the extent root while also trying to search the extent
  338. * root to add free space. So we skip locking and search the commit
  339. * root, since its read-only
  340. */
  341. path->skip_locking = 1;
  342. path->search_commit_root = 1;
  343. path->reada = 1;
  344. key.objectid = last;
  345. key.offset = 0;
  346. key.type = BTRFS_EXTENT_ITEM_KEY;
  347. again:
  348. mutex_lock(&caching_ctl->mutex);
  349. /* need to make sure the commit_root doesn't disappear */
  350. down_read(&fs_info->extent_commit_sem);
  351. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  352. if (ret < 0)
  353. goto err;
  354. leaf = path->nodes[0];
  355. nritems = btrfs_header_nritems(leaf);
  356. while (1) {
  357. if (btrfs_fs_closing(fs_info) > 1) {
  358. last = (u64)-1;
  359. break;
  360. }
  361. if (path->slots[0] < nritems) {
  362. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  363. } else {
  364. ret = find_next_key(path, 0, &key);
  365. if (ret)
  366. break;
  367. if (need_resched() ||
  368. btrfs_next_leaf(extent_root, path)) {
  369. caching_ctl->progress = last;
  370. btrfs_release_path(path);
  371. up_read(&fs_info->extent_commit_sem);
  372. mutex_unlock(&caching_ctl->mutex);
  373. cond_resched();
  374. goto again;
  375. }
  376. leaf = path->nodes[0];
  377. nritems = btrfs_header_nritems(leaf);
  378. continue;
  379. }
  380. if (key.objectid < block_group->key.objectid) {
  381. path->slots[0]++;
  382. continue;
  383. }
  384. if (key.objectid >= block_group->key.objectid +
  385. block_group->key.offset)
  386. break;
  387. if (key.type == BTRFS_EXTENT_ITEM_KEY) {
  388. total_found += add_new_free_space(block_group,
  389. fs_info, last,
  390. key.objectid);
  391. last = key.objectid + key.offset;
  392. if (total_found > (1024 * 1024 * 2)) {
  393. total_found = 0;
  394. wake_up(&caching_ctl->wait);
  395. }
  396. }
  397. path->slots[0]++;
  398. }
  399. ret = 0;
  400. total_found += add_new_free_space(block_group, fs_info, last,
  401. block_group->key.objectid +
  402. block_group->key.offset);
  403. caching_ctl->progress = (u64)-1;
  404. spin_lock(&block_group->lock);
  405. block_group->caching_ctl = NULL;
  406. block_group->cached = BTRFS_CACHE_FINISHED;
  407. spin_unlock(&block_group->lock);
  408. err:
  409. btrfs_free_path(path);
  410. up_read(&fs_info->extent_commit_sem);
  411. free_excluded_extents(extent_root, block_group);
  412. mutex_unlock(&caching_ctl->mutex);
  413. out:
  414. wake_up(&caching_ctl->wait);
  415. put_caching_control(caching_ctl);
  416. btrfs_put_block_group(block_group);
  417. }
  418. static int cache_block_group(struct btrfs_block_group_cache *cache,
  419. int load_cache_only)
  420. {
  421. DEFINE_WAIT(wait);
  422. struct btrfs_fs_info *fs_info = cache->fs_info;
  423. struct btrfs_caching_control *caching_ctl;
  424. int ret = 0;
  425. caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
  426. if (!caching_ctl)
  427. return -ENOMEM;
  428. INIT_LIST_HEAD(&caching_ctl->list);
  429. mutex_init(&caching_ctl->mutex);
  430. init_waitqueue_head(&caching_ctl->wait);
  431. caching_ctl->block_group = cache;
  432. caching_ctl->progress = cache->key.objectid;
  433. atomic_set(&caching_ctl->count, 1);
  434. caching_ctl->work.func = caching_thread;
  435. spin_lock(&cache->lock);
  436. /*
  437. * This should be a rare occasion, but this could happen I think in the
  438. * case where one thread starts to load the space cache info, and then
  439. * some other thread starts a transaction commit which tries to do an
  440. * allocation while the other thread is still loading the space cache
  441. * info. The previous loop should have kept us from choosing this block
  442. * group, but if we've moved to the state where we will wait on caching
  443. * block groups we need to first check if we're doing a fast load here,
  444. * so we can wait for it to finish, otherwise we could end up allocating
  445. * from a block group who's cache gets evicted for one reason or
  446. * another.
  447. */
  448. while (cache->cached == BTRFS_CACHE_FAST) {
  449. struct btrfs_caching_control *ctl;
  450. ctl = cache->caching_ctl;
  451. atomic_inc(&ctl->count);
  452. prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
  453. spin_unlock(&cache->lock);
  454. schedule();
  455. finish_wait(&ctl->wait, &wait);
  456. put_caching_control(ctl);
  457. spin_lock(&cache->lock);
  458. }
  459. if (cache->cached != BTRFS_CACHE_NO) {
  460. spin_unlock(&cache->lock);
  461. kfree(caching_ctl);
  462. return 0;
  463. }
  464. WARN_ON(cache->caching_ctl);
  465. cache->caching_ctl = caching_ctl;
  466. cache->cached = BTRFS_CACHE_FAST;
  467. spin_unlock(&cache->lock);
  468. if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
  469. ret = load_free_space_cache(fs_info, cache);
  470. spin_lock(&cache->lock);
  471. if (ret == 1) {
  472. cache->caching_ctl = NULL;
  473. cache->cached = BTRFS_CACHE_FINISHED;
  474. cache->last_byte_to_unpin = (u64)-1;
  475. } else {
  476. if (load_cache_only) {
  477. cache->caching_ctl = NULL;
  478. cache->cached = BTRFS_CACHE_NO;
  479. } else {
  480. cache->cached = BTRFS_CACHE_STARTED;
  481. }
  482. }
  483. spin_unlock(&cache->lock);
  484. wake_up(&caching_ctl->wait);
  485. if (ret == 1) {
  486. put_caching_control(caching_ctl);
  487. free_excluded_extents(fs_info->extent_root, cache);
  488. return 0;
  489. }
  490. } else {
  491. /*
  492. * We are not going to do the fast caching, set cached to the
  493. * appropriate value and wakeup any waiters.
  494. */
  495. spin_lock(&cache->lock);
  496. if (load_cache_only) {
  497. cache->caching_ctl = NULL;
  498. cache->cached = BTRFS_CACHE_NO;
  499. } else {
  500. cache->cached = BTRFS_CACHE_STARTED;
  501. }
  502. spin_unlock(&cache->lock);
  503. wake_up(&caching_ctl->wait);
  504. }
  505. if (load_cache_only) {
  506. put_caching_control(caching_ctl);
  507. return 0;
  508. }
  509. down_write(&fs_info->extent_commit_sem);
  510. atomic_inc(&caching_ctl->count);
  511. list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
  512. up_write(&fs_info->extent_commit_sem);
  513. btrfs_get_block_group(cache);
  514. btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
  515. return ret;
  516. }
  517. /*
  518. * return the block group that starts at or after bytenr
  519. */
  520. static struct btrfs_block_group_cache *
  521. btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
  522. {
  523. struct btrfs_block_group_cache *cache;
  524. cache = block_group_cache_tree_search(info, bytenr, 0);
  525. return cache;
  526. }
  527. /*
  528. * return the block group that contains the given bytenr
  529. */
  530. struct btrfs_block_group_cache *btrfs_lookup_block_group(
  531. struct btrfs_fs_info *info,
  532. u64 bytenr)
  533. {
  534. struct btrfs_block_group_cache *cache;
  535. cache = block_group_cache_tree_search(info, bytenr, 1);
  536. return cache;
  537. }
  538. static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
  539. u64 flags)
  540. {
  541. struct list_head *head = &info->space_info;
  542. struct btrfs_space_info *found;
  543. flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
  544. rcu_read_lock();
  545. list_for_each_entry_rcu(found, head, list) {
  546. if (found->flags & flags) {
  547. rcu_read_unlock();
  548. return found;
  549. }
  550. }
  551. rcu_read_unlock();
  552. return NULL;
  553. }
  554. /*
  555. * after adding space to the filesystem, we need to clear the full flags
  556. * on all the space infos.
  557. */
  558. void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
  559. {
  560. struct list_head *head = &info->space_info;
  561. struct btrfs_space_info *found;
  562. rcu_read_lock();
  563. list_for_each_entry_rcu(found, head, list)
  564. found->full = 0;
  565. rcu_read_unlock();
  566. }
  567. u64 btrfs_find_block_group(struct btrfs_root *root,
  568. u64 search_start, u64 search_hint, int owner)
  569. {
  570. struct btrfs_block_group_cache *cache;
  571. u64 used;
  572. u64 last = max(search_hint, search_start);
  573. u64 group_start = 0;
  574. int full_search = 0;
  575. int factor = 9;
  576. int wrapped = 0;
  577. again:
  578. while (1) {
  579. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  580. if (!cache)
  581. break;
  582. spin_lock(&cache->lock);
  583. last = cache->key.objectid + cache->key.offset;
  584. used = btrfs_block_group_used(&cache->item);
  585. if ((full_search || !cache->ro) &&
  586. block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
  587. if (used + cache->pinned + cache->reserved <
  588. div_factor(cache->key.offset, factor)) {
  589. group_start = cache->key.objectid;
  590. spin_unlock(&cache->lock);
  591. btrfs_put_block_group(cache);
  592. goto found;
  593. }
  594. }
  595. spin_unlock(&cache->lock);
  596. btrfs_put_block_group(cache);
  597. cond_resched();
  598. }
  599. if (!wrapped) {
  600. last = search_start;
  601. wrapped = 1;
  602. goto again;
  603. }
  604. if (!full_search && factor < 10) {
  605. last = search_start;
  606. full_search = 1;
  607. factor = 10;
  608. goto again;
  609. }
  610. found:
  611. return group_start;
  612. }
  613. /* simple helper to search for an existing extent at a given offset */
  614. int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
  615. {
  616. int ret;
  617. struct btrfs_key key;
  618. struct btrfs_path *path;
  619. path = btrfs_alloc_path();
  620. if (!path)
  621. return -ENOMEM;
  622. key.objectid = start;
  623. key.offset = len;
  624. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  625. ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
  626. 0, 0);
  627. btrfs_free_path(path);
  628. return ret;
  629. }
  630. /*
  631. * helper function to lookup reference count and flags of extent.
  632. *
  633. * the head node for delayed ref is used to store the sum of all the
  634. * reference count modifications queued up in the rbtree. the head
  635. * node may also store the extent flags to set. This way you can check
  636. * to see what the reference count and extent flags would be if all of
  637. * the delayed refs are not processed.
  638. */
  639. int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
  640. struct btrfs_root *root, u64 bytenr,
  641. u64 num_bytes, u64 *refs, u64 *flags)
  642. {
  643. struct btrfs_delayed_ref_head *head;
  644. struct btrfs_delayed_ref_root *delayed_refs;
  645. struct btrfs_path *path;
  646. struct btrfs_extent_item *ei;
  647. struct extent_buffer *leaf;
  648. struct btrfs_key key;
  649. u32 item_size;
  650. u64 num_refs;
  651. u64 extent_flags;
  652. int ret;
  653. path = btrfs_alloc_path();
  654. if (!path)
  655. return -ENOMEM;
  656. key.objectid = bytenr;
  657. key.type = BTRFS_EXTENT_ITEM_KEY;
  658. key.offset = num_bytes;
  659. if (!trans) {
  660. path->skip_locking = 1;
  661. path->search_commit_root = 1;
  662. }
  663. again:
  664. ret = btrfs_search_slot(trans, root->fs_info->extent_root,
  665. &key, path, 0, 0);
  666. if (ret < 0)
  667. goto out_free;
  668. if (ret == 0) {
  669. leaf = path->nodes[0];
  670. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  671. if (item_size >= sizeof(*ei)) {
  672. ei = btrfs_item_ptr(leaf, path->slots[0],
  673. struct btrfs_extent_item);
  674. num_refs = btrfs_extent_refs(leaf, ei);
  675. extent_flags = btrfs_extent_flags(leaf, ei);
  676. } else {
  677. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  678. struct btrfs_extent_item_v0 *ei0;
  679. BUG_ON(item_size != sizeof(*ei0));
  680. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  681. struct btrfs_extent_item_v0);
  682. num_refs = btrfs_extent_refs_v0(leaf, ei0);
  683. /* FIXME: this isn't correct for data */
  684. extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  685. #else
  686. BUG();
  687. #endif
  688. }
  689. BUG_ON(num_refs == 0);
  690. } else {
  691. num_refs = 0;
  692. extent_flags = 0;
  693. ret = 0;
  694. }
  695. if (!trans)
  696. goto out;
  697. delayed_refs = &trans->transaction->delayed_refs;
  698. spin_lock(&delayed_refs->lock);
  699. head = btrfs_find_delayed_ref_head(trans, bytenr);
  700. if (head) {
  701. if (!mutex_trylock(&head->mutex)) {
  702. atomic_inc(&head->node.refs);
  703. spin_unlock(&delayed_refs->lock);
  704. btrfs_release_path(path);
  705. /*
  706. * Mutex was contended, block until it's released and try
  707. * again
  708. */
  709. mutex_lock(&head->mutex);
  710. mutex_unlock(&head->mutex);
  711. btrfs_put_delayed_ref(&head->node);
  712. goto again;
  713. }
  714. if (head->extent_op && head->extent_op->update_flags)
  715. extent_flags |= head->extent_op->flags_to_set;
  716. else
  717. BUG_ON(num_refs == 0);
  718. num_refs += head->node.ref_mod;
  719. mutex_unlock(&head->mutex);
  720. }
  721. spin_unlock(&delayed_refs->lock);
  722. out:
  723. WARN_ON(num_refs == 0);
  724. if (refs)
  725. *refs = num_refs;
  726. if (flags)
  727. *flags = extent_flags;
  728. out_free:
  729. btrfs_free_path(path);
  730. return ret;
  731. }
  732. /*
  733. * Back reference rules. Back refs have three main goals:
  734. *
  735. * 1) differentiate between all holders of references to an extent so that
  736. * when a reference is dropped we can make sure it was a valid reference
  737. * before freeing the extent.
  738. *
  739. * 2) Provide enough information to quickly find the holders of an extent
  740. * if we notice a given block is corrupted or bad.
  741. *
  742. * 3) Make it easy to migrate blocks for FS shrinking or storage pool
  743. * maintenance. This is actually the same as #2, but with a slightly
  744. * different use case.
  745. *
  746. * There are two kinds of back refs. The implicit back refs is optimized
  747. * for pointers in non-shared tree blocks. For a given pointer in a block,
  748. * back refs of this kind provide information about the block's owner tree
  749. * and the pointer's key. These information allow us to find the block by
  750. * b-tree searching. The full back refs is for pointers in tree blocks not
  751. * referenced by their owner trees. The location of tree block is recorded
  752. * in the back refs. Actually the full back refs is generic, and can be
  753. * used in all cases the implicit back refs is used. The major shortcoming
  754. * of the full back refs is its overhead. Every time a tree block gets
  755. * COWed, we have to update back refs entry for all pointers in it.
  756. *
  757. * For a newly allocated tree block, we use implicit back refs for
  758. * pointers in it. This means most tree related operations only involve
  759. * implicit back refs. For a tree block created in old transaction, the
  760. * only way to drop a reference to it is COW it. So we can detect the
  761. * event that tree block loses its owner tree's reference and do the
  762. * back refs conversion.
  763. *
  764. * When a tree block is COW'd through a tree, there are four cases:
  765. *
  766. * The reference count of the block is one and the tree is the block's
  767. * owner tree. Nothing to do in this case.
  768. *
  769. * The reference count of the block is one and the tree is not the
  770. * block's owner tree. In this case, full back refs is used for pointers
  771. * in the block. Remove these full back refs, add implicit back refs for
  772. * every pointers in the new block.
  773. *
  774. * The reference count of the block is greater than one and the tree is
  775. * the block's owner tree. In this case, implicit back refs is used for
  776. * pointers in the block. Add full back refs for every pointers in the
  777. * block, increase lower level extents' reference counts. The original
  778. * implicit back refs are entailed to the new block.
  779. *
  780. * The reference count of the block is greater than one and the tree is
  781. * not the block's owner tree. Add implicit back refs for every pointer in
  782. * the new block, increase lower level extents' reference count.
  783. *
  784. * Back Reference Key composing:
  785. *
  786. * The key objectid corresponds to the first byte in the extent,
  787. * The key type is used to differentiate between types of back refs.
  788. * There are different meanings of the key offset for different types
  789. * of back refs.
  790. *
  791. * File extents can be referenced by:
  792. *
  793. * - multiple snapshots, subvolumes, or different generations in one subvol
  794. * - different files inside a single subvolume
  795. * - different offsets inside a file (bookend extents in file.c)
  796. *
  797. * The extent ref structure for the implicit back refs has fields for:
  798. *
  799. * - Objectid of the subvolume root
  800. * - objectid of the file holding the reference
  801. * - original offset in the file
  802. * - how many bookend extents
  803. *
  804. * The key offset for the implicit back refs is hash of the first
  805. * three fields.
  806. *
  807. * The extent ref structure for the full back refs has field for:
  808. *
  809. * - number of pointers in the tree leaf
  810. *
  811. * The key offset for the implicit back refs is the first byte of
  812. * the tree leaf
  813. *
  814. * When a file extent is allocated, The implicit back refs is used.
  815. * the fields are filled in:
  816. *
  817. * (root_key.objectid, inode objectid, offset in file, 1)
  818. *
  819. * When a file extent is removed file truncation, we find the
  820. * corresponding implicit back refs and check the following fields:
  821. *
  822. * (btrfs_header_owner(leaf), inode objectid, offset in file)
  823. *
  824. * Btree extents can be referenced by:
  825. *
  826. * - Different subvolumes
  827. *
  828. * Both the implicit back refs and the full back refs for tree blocks
  829. * only consist of key. The key offset for the implicit back refs is
  830. * objectid of block's owner tree. The key offset for the full back refs
  831. * is the first byte of parent block.
  832. *
  833. * When implicit back refs is used, information about the lowest key and
  834. * level of the tree block are required. These information are stored in
  835. * tree block info structure.
  836. */
  837. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  838. static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
  839. struct btrfs_root *root,
  840. struct btrfs_path *path,
  841. u64 owner, u32 extra_size)
  842. {
  843. struct btrfs_extent_item *item;
  844. struct btrfs_extent_item_v0 *ei0;
  845. struct btrfs_extent_ref_v0 *ref0;
  846. struct btrfs_tree_block_info *bi;
  847. struct extent_buffer *leaf;
  848. struct btrfs_key key;
  849. struct btrfs_key found_key;
  850. u32 new_size = sizeof(*item);
  851. u64 refs;
  852. int ret;
  853. leaf = path->nodes[0];
  854. BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
  855. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  856. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  857. struct btrfs_extent_item_v0);
  858. refs = btrfs_extent_refs_v0(leaf, ei0);
  859. if (owner == (u64)-1) {
  860. while (1) {
  861. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  862. ret = btrfs_next_leaf(root, path);
  863. if (ret < 0)
  864. return ret;
  865. BUG_ON(ret > 0); /* Corruption */
  866. leaf = path->nodes[0];
  867. }
  868. btrfs_item_key_to_cpu(leaf, &found_key,
  869. path->slots[0]);
  870. BUG_ON(key.objectid != found_key.objectid);
  871. if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
  872. path->slots[0]++;
  873. continue;
  874. }
  875. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  876. struct btrfs_extent_ref_v0);
  877. owner = btrfs_ref_objectid_v0(leaf, ref0);
  878. break;
  879. }
  880. }
  881. btrfs_release_path(path);
  882. if (owner < BTRFS_FIRST_FREE_OBJECTID)
  883. new_size += sizeof(*bi);
  884. new_size -= sizeof(*ei0);
  885. ret = btrfs_search_slot(trans, root, &key, path,
  886. new_size + extra_size, 1);
  887. if (ret < 0)
  888. return ret;
  889. BUG_ON(ret); /* Corruption */
  890. btrfs_extend_item(trans, root, path, new_size);
  891. leaf = path->nodes[0];
  892. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  893. btrfs_set_extent_refs(leaf, item, refs);
  894. /* FIXME: get real generation */
  895. btrfs_set_extent_generation(leaf, item, 0);
  896. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  897. btrfs_set_extent_flags(leaf, item,
  898. BTRFS_EXTENT_FLAG_TREE_BLOCK |
  899. BTRFS_BLOCK_FLAG_FULL_BACKREF);
  900. bi = (struct btrfs_tree_block_info *)(item + 1);
  901. /* FIXME: get first key of the block */
  902. memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
  903. btrfs_set_tree_block_level(leaf, bi, (int)owner);
  904. } else {
  905. btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
  906. }
  907. btrfs_mark_buffer_dirty(leaf);
  908. return 0;
  909. }
  910. #endif
  911. static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
  912. {
  913. u32 high_crc = ~(u32)0;
  914. u32 low_crc = ~(u32)0;
  915. __le64 lenum;
  916. lenum = cpu_to_le64(root_objectid);
  917. high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
  918. lenum = cpu_to_le64(owner);
  919. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  920. lenum = cpu_to_le64(offset);
  921. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  922. return ((u64)high_crc << 31) ^ (u64)low_crc;
  923. }
  924. static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
  925. struct btrfs_extent_data_ref *ref)
  926. {
  927. return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
  928. btrfs_extent_data_ref_objectid(leaf, ref),
  929. btrfs_extent_data_ref_offset(leaf, ref));
  930. }
  931. static int match_extent_data_ref(struct extent_buffer *leaf,
  932. struct btrfs_extent_data_ref *ref,
  933. u64 root_objectid, u64 owner, u64 offset)
  934. {
  935. if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
  936. btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
  937. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  938. return 0;
  939. return 1;
  940. }
  941. static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
  942. struct btrfs_root *root,
  943. struct btrfs_path *path,
  944. u64 bytenr, u64 parent,
  945. u64 root_objectid,
  946. u64 owner, u64 offset)
  947. {
  948. struct btrfs_key key;
  949. struct btrfs_extent_data_ref *ref;
  950. struct extent_buffer *leaf;
  951. u32 nritems;
  952. int ret;
  953. int recow;
  954. int err = -ENOENT;
  955. key.objectid = bytenr;
  956. if (parent) {
  957. key.type = BTRFS_SHARED_DATA_REF_KEY;
  958. key.offset = parent;
  959. } else {
  960. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  961. key.offset = hash_extent_data_ref(root_objectid,
  962. owner, offset);
  963. }
  964. again:
  965. recow = 0;
  966. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  967. if (ret < 0) {
  968. err = ret;
  969. goto fail;
  970. }
  971. if (parent) {
  972. if (!ret)
  973. return 0;
  974. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  975. key.type = BTRFS_EXTENT_REF_V0_KEY;
  976. btrfs_release_path(path);
  977. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  978. if (ret < 0) {
  979. err = ret;
  980. goto fail;
  981. }
  982. if (!ret)
  983. return 0;
  984. #endif
  985. goto fail;
  986. }
  987. leaf = path->nodes[0];
  988. nritems = btrfs_header_nritems(leaf);
  989. while (1) {
  990. if (path->slots[0] >= nritems) {
  991. ret = btrfs_next_leaf(root, path);
  992. if (ret < 0)
  993. err = ret;
  994. if (ret)
  995. goto fail;
  996. leaf = path->nodes[0];
  997. nritems = btrfs_header_nritems(leaf);
  998. recow = 1;
  999. }
  1000. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1001. if (key.objectid != bytenr ||
  1002. key.type != BTRFS_EXTENT_DATA_REF_KEY)
  1003. goto fail;
  1004. ref = btrfs_item_ptr(leaf, path->slots[0],
  1005. struct btrfs_extent_data_ref);
  1006. if (match_extent_data_ref(leaf, ref, root_objectid,
  1007. owner, offset)) {
  1008. if (recow) {
  1009. btrfs_release_path(path);
  1010. goto again;
  1011. }
  1012. err = 0;
  1013. break;
  1014. }
  1015. path->slots[0]++;
  1016. }
  1017. fail:
  1018. return err;
  1019. }
  1020. static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
  1021. struct btrfs_root *root,
  1022. struct btrfs_path *path,
  1023. u64 bytenr, u64 parent,
  1024. u64 root_objectid, u64 owner,
  1025. u64 offset, int refs_to_add)
  1026. {
  1027. struct btrfs_key key;
  1028. struct extent_buffer *leaf;
  1029. u32 size;
  1030. u32 num_refs;
  1031. int ret;
  1032. key.objectid = bytenr;
  1033. if (parent) {
  1034. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1035. key.offset = parent;
  1036. size = sizeof(struct btrfs_shared_data_ref);
  1037. } else {
  1038. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1039. key.offset = hash_extent_data_ref(root_objectid,
  1040. owner, offset);
  1041. size = sizeof(struct btrfs_extent_data_ref);
  1042. }
  1043. ret = btrfs_insert_empty_item(trans, root, path, &key, size);
  1044. if (ret && ret != -EEXIST)
  1045. goto fail;
  1046. leaf = path->nodes[0];
  1047. if (parent) {
  1048. struct btrfs_shared_data_ref *ref;
  1049. ref = btrfs_item_ptr(leaf, path->slots[0],
  1050. struct btrfs_shared_data_ref);
  1051. if (ret == 0) {
  1052. btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
  1053. } else {
  1054. num_refs = btrfs_shared_data_ref_count(leaf, ref);
  1055. num_refs += refs_to_add;
  1056. btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
  1057. }
  1058. } else {
  1059. struct btrfs_extent_data_ref *ref;
  1060. while (ret == -EEXIST) {
  1061. ref = btrfs_item_ptr(leaf, path->slots[0],
  1062. struct btrfs_extent_data_ref);
  1063. if (match_extent_data_ref(leaf, ref, root_objectid,
  1064. owner, offset))
  1065. break;
  1066. btrfs_release_path(path);
  1067. key.offset++;
  1068. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1069. size);
  1070. if (ret && ret != -EEXIST)
  1071. goto fail;
  1072. leaf = path->nodes[0];
  1073. }
  1074. ref = btrfs_item_ptr(leaf, path->slots[0],
  1075. struct btrfs_extent_data_ref);
  1076. if (ret == 0) {
  1077. btrfs_set_extent_data_ref_root(leaf, ref,
  1078. root_objectid);
  1079. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  1080. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  1081. btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
  1082. } else {
  1083. num_refs = btrfs_extent_data_ref_count(leaf, ref);
  1084. num_refs += refs_to_add;
  1085. btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
  1086. }
  1087. }
  1088. btrfs_mark_buffer_dirty(leaf);
  1089. ret = 0;
  1090. fail:
  1091. btrfs_release_path(path);
  1092. return ret;
  1093. }
  1094. static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
  1095. struct btrfs_root *root,
  1096. struct btrfs_path *path,
  1097. int refs_to_drop)
  1098. {
  1099. struct btrfs_key key;
  1100. struct btrfs_extent_data_ref *ref1 = NULL;
  1101. struct btrfs_shared_data_ref *ref2 = NULL;
  1102. struct extent_buffer *leaf;
  1103. u32 num_refs = 0;
  1104. int ret = 0;
  1105. leaf = path->nodes[0];
  1106. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1107. if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1108. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1109. struct btrfs_extent_data_ref);
  1110. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1111. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1112. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1113. struct btrfs_shared_data_ref);
  1114. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1115. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1116. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1117. struct btrfs_extent_ref_v0 *ref0;
  1118. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1119. struct btrfs_extent_ref_v0);
  1120. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1121. #endif
  1122. } else {
  1123. BUG();
  1124. }
  1125. BUG_ON(num_refs < refs_to_drop);
  1126. num_refs -= refs_to_drop;
  1127. if (num_refs == 0) {
  1128. ret = btrfs_del_item(trans, root, path);
  1129. } else {
  1130. if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
  1131. btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
  1132. else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
  1133. btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
  1134. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1135. else {
  1136. struct btrfs_extent_ref_v0 *ref0;
  1137. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1138. struct btrfs_extent_ref_v0);
  1139. btrfs_set_ref_count_v0(leaf, ref0, num_refs);
  1140. }
  1141. #endif
  1142. btrfs_mark_buffer_dirty(leaf);
  1143. }
  1144. return ret;
  1145. }
  1146. static noinline u32 extent_data_ref_count(struct btrfs_root *root,
  1147. struct btrfs_path *path,
  1148. struct btrfs_extent_inline_ref *iref)
  1149. {
  1150. struct btrfs_key key;
  1151. struct extent_buffer *leaf;
  1152. struct btrfs_extent_data_ref *ref1;
  1153. struct btrfs_shared_data_ref *ref2;
  1154. u32 num_refs = 0;
  1155. leaf = path->nodes[0];
  1156. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1157. if (iref) {
  1158. if (btrfs_extent_inline_ref_type(leaf, iref) ==
  1159. BTRFS_EXTENT_DATA_REF_KEY) {
  1160. ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
  1161. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1162. } else {
  1163. ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
  1164. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1165. }
  1166. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1167. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1168. struct btrfs_extent_data_ref);
  1169. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1170. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1171. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1172. struct btrfs_shared_data_ref);
  1173. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1174. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1175. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1176. struct btrfs_extent_ref_v0 *ref0;
  1177. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1178. struct btrfs_extent_ref_v0);
  1179. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1180. #endif
  1181. } else {
  1182. WARN_ON(1);
  1183. }
  1184. return num_refs;
  1185. }
  1186. static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
  1187. struct btrfs_root *root,
  1188. struct btrfs_path *path,
  1189. u64 bytenr, u64 parent,
  1190. u64 root_objectid)
  1191. {
  1192. struct btrfs_key key;
  1193. int ret;
  1194. key.objectid = bytenr;
  1195. if (parent) {
  1196. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1197. key.offset = parent;
  1198. } else {
  1199. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1200. key.offset = root_objectid;
  1201. }
  1202. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1203. if (ret > 0)
  1204. ret = -ENOENT;
  1205. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1206. if (ret == -ENOENT && parent) {
  1207. btrfs_release_path(path);
  1208. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1209. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1210. if (ret > 0)
  1211. ret = -ENOENT;
  1212. }
  1213. #endif
  1214. return ret;
  1215. }
  1216. static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
  1217. struct btrfs_root *root,
  1218. struct btrfs_path *path,
  1219. u64 bytenr, u64 parent,
  1220. u64 root_objectid)
  1221. {
  1222. struct btrfs_key key;
  1223. int ret;
  1224. key.objectid = bytenr;
  1225. if (parent) {
  1226. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1227. key.offset = parent;
  1228. } else {
  1229. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1230. key.offset = root_objectid;
  1231. }
  1232. ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
  1233. btrfs_release_path(path);
  1234. return ret;
  1235. }
  1236. static inline int extent_ref_type(u64 parent, u64 owner)
  1237. {
  1238. int type;
  1239. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1240. if (parent > 0)
  1241. type = BTRFS_SHARED_BLOCK_REF_KEY;
  1242. else
  1243. type = BTRFS_TREE_BLOCK_REF_KEY;
  1244. } else {
  1245. if (parent > 0)
  1246. type = BTRFS_SHARED_DATA_REF_KEY;
  1247. else
  1248. type = BTRFS_EXTENT_DATA_REF_KEY;
  1249. }
  1250. return type;
  1251. }
  1252. static int find_next_key(struct btrfs_path *path, int level,
  1253. struct btrfs_key *key)
  1254. {
  1255. for (; level < BTRFS_MAX_LEVEL; level++) {
  1256. if (!path->nodes[level])
  1257. break;
  1258. if (path->slots[level] + 1 >=
  1259. btrfs_header_nritems(path->nodes[level]))
  1260. continue;
  1261. if (level == 0)
  1262. btrfs_item_key_to_cpu(path->nodes[level], key,
  1263. path->slots[level] + 1);
  1264. else
  1265. btrfs_node_key_to_cpu(path->nodes[level], key,
  1266. path->slots[level] + 1);
  1267. return 0;
  1268. }
  1269. return 1;
  1270. }
  1271. /*
  1272. * look for inline back ref. if back ref is found, *ref_ret is set
  1273. * to the address of inline back ref, and 0 is returned.
  1274. *
  1275. * if back ref isn't found, *ref_ret is set to the address where it
  1276. * should be inserted, and -ENOENT is returned.
  1277. *
  1278. * if insert is true and there are too many inline back refs, the path
  1279. * points to the extent item, and -EAGAIN is returned.
  1280. *
  1281. * NOTE: inline back refs are ordered in the same way that back ref
  1282. * items in the tree are ordered.
  1283. */
  1284. static noinline_for_stack
  1285. int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1286. struct btrfs_root *root,
  1287. struct btrfs_path *path,
  1288. struct btrfs_extent_inline_ref **ref_ret,
  1289. u64 bytenr, u64 num_bytes,
  1290. u64 parent, u64 root_objectid,
  1291. u64 owner, u64 offset, int insert)
  1292. {
  1293. struct btrfs_key key;
  1294. struct extent_buffer *leaf;
  1295. struct btrfs_extent_item *ei;
  1296. struct btrfs_extent_inline_ref *iref;
  1297. u64 flags;
  1298. u64 item_size;
  1299. unsigned long ptr;
  1300. unsigned long end;
  1301. int extra_size;
  1302. int type;
  1303. int want;
  1304. int ret;
  1305. int err = 0;
  1306. key.objectid = bytenr;
  1307. key.type = BTRFS_EXTENT_ITEM_KEY;
  1308. key.offset = num_bytes;
  1309. want = extent_ref_type(parent, owner);
  1310. if (insert) {
  1311. extra_size = btrfs_extent_inline_ref_size(want);
  1312. path->keep_locks = 1;
  1313. } else
  1314. extra_size = -1;
  1315. ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
  1316. if (ret < 0) {
  1317. err = ret;
  1318. goto out;
  1319. }
  1320. if (ret && !insert) {
  1321. err = -ENOENT;
  1322. goto out;
  1323. } else if (ret) {
  1324. err = -EIO;
  1325. WARN_ON(1);
  1326. goto out;
  1327. }
  1328. leaf = path->nodes[0];
  1329. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1330. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1331. if (item_size < sizeof(*ei)) {
  1332. if (!insert) {
  1333. err = -ENOENT;
  1334. goto out;
  1335. }
  1336. ret = convert_extent_item_v0(trans, root, path, owner,
  1337. extra_size);
  1338. if (ret < 0) {
  1339. err = ret;
  1340. goto out;
  1341. }
  1342. leaf = path->nodes[0];
  1343. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1344. }
  1345. #endif
  1346. BUG_ON(item_size < sizeof(*ei));
  1347. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1348. flags = btrfs_extent_flags(leaf, ei);
  1349. ptr = (unsigned long)(ei + 1);
  1350. end = (unsigned long)ei + item_size;
  1351. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  1352. ptr += sizeof(struct btrfs_tree_block_info);
  1353. BUG_ON(ptr > end);
  1354. } else {
  1355. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
  1356. }
  1357. err = -ENOENT;
  1358. while (1) {
  1359. if (ptr >= end) {
  1360. WARN_ON(ptr > end);
  1361. break;
  1362. }
  1363. iref = (struct btrfs_extent_inline_ref *)ptr;
  1364. type = btrfs_extent_inline_ref_type(leaf, iref);
  1365. if (want < type)
  1366. break;
  1367. if (want > type) {
  1368. ptr += btrfs_extent_inline_ref_size(type);
  1369. continue;
  1370. }
  1371. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1372. struct btrfs_extent_data_ref *dref;
  1373. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1374. if (match_extent_data_ref(leaf, dref, root_objectid,
  1375. owner, offset)) {
  1376. err = 0;
  1377. break;
  1378. }
  1379. if (hash_extent_data_ref_item(leaf, dref) <
  1380. hash_extent_data_ref(root_objectid, owner, offset))
  1381. break;
  1382. } else {
  1383. u64 ref_offset;
  1384. ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
  1385. if (parent > 0) {
  1386. if (parent == ref_offset) {
  1387. err = 0;
  1388. break;
  1389. }
  1390. if (ref_offset < parent)
  1391. break;
  1392. } else {
  1393. if (root_objectid == ref_offset) {
  1394. err = 0;
  1395. break;
  1396. }
  1397. if (ref_offset < root_objectid)
  1398. break;
  1399. }
  1400. }
  1401. ptr += btrfs_extent_inline_ref_size(type);
  1402. }
  1403. if (err == -ENOENT && insert) {
  1404. if (item_size + extra_size >=
  1405. BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
  1406. err = -EAGAIN;
  1407. goto out;
  1408. }
  1409. /*
  1410. * To add new inline back ref, we have to make sure
  1411. * there is no corresponding back ref item.
  1412. * For simplicity, we just do not add new inline back
  1413. * ref if there is any kind of item for this block
  1414. */
  1415. if (find_next_key(path, 0, &key) == 0 &&
  1416. key.objectid == bytenr &&
  1417. key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
  1418. err = -EAGAIN;
  1419. goto out;
  1420. }
  1421. }
  1422. *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
  1423. out:
  1424. if (insert) {
  1425. path->keep_locks = 0;
  1426. btrfs_unlock_up_safe(path, 1);
  1427. }
  1428. return err;
  1429. }
  1430. /*
  1431. * helper to add new inline back ref
  1432. */
  1433. static noinline_for_stack
  1434. void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1435. struct btrfs_root *root,
  1436. struct btrfs_path *path,
  1437. struct btrfs_extent_inline_ref *iref,
  1438. u64 parent, u64 root_objectid,
  1439. u64 owner, u64 offset, int refs_to_add,
  1440. struct btrfs_delayed_extent_op *extent_op)
  1441. {
  1442. struct extent_buffer *leaf;
  1443. struct btrfs_extent_item *ei;
  1444. unsigned long ptr;
  1445. unsigned long end;
  1446. unsigned long item_offset;
  1447. u64 refs;
  1448. int size;
  1449. int type;
  1450. leaf = path->nodes[0];
  1451. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1452. item_offset = (unsigned long)iref - (unsigned long)ei;
  1453. type = extent_ref_type(parent, owner);
  1454. size = btrfs_extent_inline_ref_size(type);
  1455. btrfs_extend_item(trans, root, path, size);
  1456. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1457. refs = btrfs_extent_refs(leaf, ei);
  1458. refs += refs_to_add;
  1459. btrfs_set_extent_refs(leaf, ei, refs);
  1460. if (extent_op)
  1461. __run_delayed_extent_op(extent_op, leaf, ei);
  1462. ptr = (unsigned long)ei + item_offset;
  1463. end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
  1464. if (ptr < end - size)
  1465. memmove_extent_buffer(leaf, ptr + size, ptr,
  1466. end - size - ptr);
  1467. iref = (struct btrfs_extent_inline_ref *)ptr;
  1468. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  1469. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1470. struct btrfs_extent_data_ref *dref;
  1471. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1472. btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
  1473. btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
  1474. btrfs_set_extent_data_ref_offset(leaf, dref, offset);
  1475. btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
  1476. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1477. struct btrfs_shared_data_ref *sref;
  1478. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1479. btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
  1480. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1481. } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  1482. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1483. } else {
  1484. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  1485. }
  1486. btrfs_mark_buffer_dirty(leaf);
  1487. }
  1488. static int lookup_extent_backref(struct btrfs_trans_handle *trans,
  1489. struct btrfs_root *root,
  1490. struct btrfs_path *path,
  1491. struct btrfs_extent_inline_ref **ref_ret,
  1492. u64 bytenr, u64 num_bytes, u64 parent,
  1493. u64 root_objectid, u64 owner, u64 offset)
  1494. {
  1495. int ret;
  1496. ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
  1497. bytenr, num_bytes, parent,
  1498. root_objectid, owner, offset, 0);
  1499. if (ret != -ENOENT)
  1500. return ret;
  1501. btrfs_release_path(path);
  1502. *ref_ret = NULL;
  1503. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1504. ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
  1505. root_objectid);
  1506. } else {
  1507. ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
  1508. root_objectid, owner, offset);
  1509. }
  1510. return ret;
  1511. }
  1512. /*
  1513. * helper to update/remove inline back ref
  1514. */
  1515. static noinline_for_stack
  1516. void update_inline_extent_backref(struct btrfs_trans_handle *trans,
  1517. struct btrfs_root *root,
  1518. struct btrfs_path *path,
  1519. struct btrfs_extent_inline_ref *iref,
  1520. int refs_to_mod,
  1521. struct btrfs_delayed_extent_op *extent_op)
  1522. {
  1523. struct extent_buffer *leaf;
  1524. struct btrfs_extent_item *ei;
  1525. struct btrfs_extent_data_ref *dref = NULL;
  1526. struct btrfs_shared_data_ref *sref = NULL;
  1527. unsigned long ptr;
  1528. unsigned long end;
  1529. u32 item_size;
  1530. int size;
  1531. int type;
  1532. u64 refs;
  1533. leaf = path->nodes[0];
  1534. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1535. refs = btrfs_extent_refs(leaf, ei);
  1536. WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
  1537. refs += refs_to_mod;
  1538. btrfs_set_extent_refs(leaf, ei, refs);
  1539. if (extent_op)
  1540. __run_delayed_extent_op(extent_op, leaf, ei);
  1541. type = btrfs_extent_inline_ref_type(leaf, iref);
  1542. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1543. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1544. refs = btrfs_extent_data_ref_count(leaf, dref);
  1545. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1546. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1547. refs = btrfs_shared_data_ref_count(leaf, sref);
  1548. } else {
  1549. refs = 1;
  1550. BUG_ON(refs_to_mod != -1);
  1551. }
  1552. BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
  1553. refs += refs_to_mod;
  1554. if (refs > 0) {
  1555. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  1556. btrfs_set_extent_data_ref_count(leaf, dref, refs);
  1557. else
  1558. btrfs_set_shared_data_ref_count(leaf, sref, refs);
  1559. } else {
  1560. size = btrfs_extent_inline_ref_size(type);
  1561. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1562. ptr = (unsigned long)iref;
  1563. end = (unsigned long)ei + item_size;
  1564. if (ptr + size < end)
  1565. memmove_extent_buffer(leaf, ptr, ptr + size,
  1566. end - ptr - size);
  1567. item_size -= size;
  1568. btrfs_truncate_item(trans, root, path, item_size, 1);
  1569. }
  1570. btrfs_mark_buffer_dirty(leaf);
  1571. }
  1572. static noinline_for_stack
  1573. int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
  1574. struct btrfs_root *root,
  1575. struct btrfs_path *path,
  1576. u64 bytenr, u64 num_bytes, u64 parent,
  1577. u64 root_objectid, u64 owner,
  1578. u64 offset, int refs_to_add,
  1579. struct btrfs_delayed_extent_op *extent_op)
  1580. {
  1581. struct btrfs_extent_inline_ref *iref;
  1582. int ret;
  1583. ret = lookup_inline_extent_backref(trans, root, path, &iref,
  1584. bytenr, num_bytes, parent,
  1585. root_objectid, owner, offset, 1);
  1586. if (ret == 0) {
  1587. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
  1588. update_inline_extent_backref(trans, root, path, iref,
  1589. refs_to_add, extent_op);
  1590. } else if (ret == -ENOENT) {
  1591. setup_inline_extent_backref(trans, root, path, iref, parent,
  1592. root_objectid, owner, offset,
  1593. refs_to_add, extent_op);
  1594. ret = 0;
  1595. }
  1596. return ret;
  1597. }
  1598. static int insert_extent_backref(struct btrfs_trans_handle *trans,
  1599. struct btrfs_root *root,
  1600. struct btrfs_path *path,
  1601. u64 bytenr, u64 parent, u64 root_objectid,
  1602. u64 owner, u64 offset, int refs_to_add)
  1603. {
  1604. int ret;
  1605. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1606. BUG_ON(refs_to_add != 1);
  1607. ret = insert_tree_block_ref(trans, root, path, bytenr,
  1608. parent, root_objectid);
  1609. } else {
  1610. ret = insert_extent_data_ref(trans, root, path, bytenr,
  1611. parent, root_objectid,
  1612. owner, offset, refs_to_add);
  1613. }
  1614. return ret;
  1615. }
  1616. static int remove_extent_backref(struct btrfs_trans_handle *trans,
  1617. struct btrfs_root *root,
  1618. struct btrfs_path *path,
  1619. struct btrfs_extent_inline_ref *iref,
  1620. int refs_to_drop, int is_data)
  1621. {
  1622. int ret = 0;
  1623. BUG_ON(!is_data && refs_to_drop != 1);
  1624. if (iref) {
  1625. update_inline_extent_backref(trans, root, path, iref,
  1626. -refs_to_drop, NULL);
  1627. } else if (is_data) {
  1628. ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
  1629. } else {
  1630. ret = btrfs_del_item(trans, root, path);
  1631. }
  1632. return ret;
  1633. }
  1634. static int btrfs_issue_discard(struct block_device *bdev,
  1635. u64 start, u64 len)
  1636. {
  1637. return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
  1638. }
  1639. static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
  1640. u64 num_bytes, u64 *actual_bytes)
  1641. {
  1642. int ret;
  1643. u64 discarded_bytes = 0;
  1644. struct btrfs_bio *bbio = NULL;
  1645. /* Tell the block device(s) that the sectors can be discarded */
  1646. ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
  1647. bytenr, &num_bytes, &bbio, 0);
  1648. /* Error condition is -ENOMEM */
  1649. if (!ret) {
  1650. struct btrfs_bio_stripe *stripe = bbio->stripes;
  1651. int i;
  1652. for (i = 0; i < bbio->num_stripes; i++, stripe++) {
  1653. if (!stripe->dev->can_discard)
  1654. continue;
  1655. ret = btrfs_issue_discard(stripe->dev->bdev,
  1656. stripe->physical,
  1657. stripe->length);
  1658. if (!ret)
  1659. discarded_bytes += stripe->length;
  1660. else if (ret != -EOPNOTSUPP)
  1661. break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
  1662. /*
  1663. * Just in case we get back EOPNOTSUPP for some reason,
  1664. * just ignore the return value so we don't screw up
  1665. * people calling discard_extent.
  1666. */
  1667. ret = 0;
  1668. }
  1669. kfree(bbio);
  1670. }
  1671. if (actual_bytes)
  1672. *actual_bytes = discarded_bytes;
  1673. if (ret == -EOPNOTSUPP)
  1674. ret = 0;
  1675. return ret;
  1676. }
  1677. /* Can return -ENOMEM */
  1678. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1679. struct btrfs_root *root,
  1680. u64 bytenr, u64 num_bytes, u64 parent,
  1681. u64 root_objectid, u64 owner, u64 offset, int for_cow)
  1682. {
  1683. int ret;
  1684. struct btrfs_fs_info *fs_info = root->fs_info;
  1685. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
  1686. root_objectid == BTRFS_TREE_LOG_OBJECTID);
  1687. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1688. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  1689. num_bytes,
  1690. parent, root_objectid, (int)owner,
  1691. BTRFS_ADD_DELAYED_REF, NULL, for_cow);
  1692. } else {
  1693. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  1694. num_bytes,
  1695. parent, root_objectid, owner, offset,
  1696. BTRFS_ADD_DELAYED_REF, NULL, for_cow);
  1697. }
  1698. return ret;
  1699. }
  1700. static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1701. struct btrfs_root *root,
  1702. u64 bytenr, u64 num_bytes,
  1703. u64 parent, u64 root_objectid,
  1704. u64 owner, u64 offset, int refs_to_add,
  1705. struct btrfs_delayed_extent_op *extent_op)
  1706. {
  1707. struct btrfs_path *path;
  1708. struct extent_buffer *leaf;
  1709. struct btrfs_extent_item *item;
  1710. u64 refs;
  1711. int ret;
  1712. int err = 0;
  1713. path = btrfs_alloc_path();
  1714. if (!path)
  1715. return -ENOMEM;
  1716. path->reada = 1;
  1717. path->leave_spinning = 1;
  1718. /* this will setup the path even if it fails to insert the back ref */
  1719. ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
  1720. path, bytenr, num_bytes, parent,
  1721. root_objectid, owner, offset,
  1722. refs_to_add, extent_op);
  1723. if (ret == 0)
  1724. goto out;
  1725. if (ret != -EAGAIN) {
  1726. err = ret;
  1727. goto out;
  1728. }
  1729. leaf = path->nodes[0];
  1730. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1731. refs = btrfs_extent_refs(leaf, item);
  1732. btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
  1733. if (extent_op)
  1734. __run_delayed_extent_op(extent_op, leaf, item);
  1735. btrfs_mark_buffer_dirty(leaf);
  1736. btrfs_release_path(path);
  1737. path->reada = 1;
  1738. path->leave_spinning = 1;
  1739. /* now insert the actual backref */
  1740. ret = insert_extent_backref(trans, root->fs_info->extent_root,
  1741. path, bytenr, parent, root_objectid,
  1742. owner, offset, refs_to_add);
  1743. if (ret)
  1744. btrfs_abort_transaction(trans, root, ret);
  1745. out:
  1746. btrfs_free_path(path);
  1747. return err;
  1748. }
  1749. static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
  1750. struct btrfs_root *root,
  1751. struct btrfs_delayed_ref_node *node,
  1752. struct btrfs_delayed_extent_op *extent_op,
  1753. int insert_reserved)
  1754. {
  1755. int ret = 0;
  1756. struct btrfs_delayed_data_ref *ref;
  1757. struct btrfs_key ins;
  1758. u64 parent = 0;
  1759. u64 ref_root = 0;
  1760. u64 flags = 0;
  1761. ins.objectid = node->bytenr;
  1762. ins.offset = node->num_bytes;
  1763. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1764. ref = btrfs_delayed_node_to_data_ref(node);
  1765. if (node->type == BTRFS_SHARED_DATA_REF_KEY)
  1766. parent = ref->parent;
  1767. else
  1768. ref_root = ref->root;
  1769. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1770. if (extent_op) {
  1771. BUG_ON(extent_op->update_key);
  1772. flags |= extent_op->flags_to_set;
  1773. }
  1774. ret = alloc_reserved_file_extent(trans, root,
  1775. parent, ref_root, flags,
  1776. ref->objectid, ref->offset,
  1777. &ins, node->ref_mod);
  1778. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1779. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  1780. node->num_bytes, parent,
  1781. ref_root, ref->objectid,
  1782. ref->offset, node->ref_mod,
  1783. extent_op);
  1784. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1785. ret = __btrfs_free_extent(trans, root, node->bytenr,
  1786. node->num_bytes, parent,
  1787. ref_root, ref->objectid,
  1788. ref->offset, node->ref_mod,
  1789. extent_op);
  1790. } else {
  1791. BUG();
  1792. }
  1793. return ret;
  1794. }
  1795. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  1796. struct extent_buffer *leaf,
  1797. struct btrfs_extent_item *ei)
  1798. {
  1799. u64 flags = btrfs_extent_flags(leaf, ei);
  1800. if (extent_op->update_flags) {
  1801. flags |= extent_op->flags_to_set;
  1802. btrfs_set_extent_flags(leaf, ei, flags);
  1803. }
  1804. if (extent_op->update_key) {
  1805. struct btrfs_tree_block_info *bi;
  1806. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
  1807. bi = (struct btrfs_tree_block_info *)(ei + 1);
  1808. btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
  1809. }
  1810. }
  1811. static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
  1812. struct btrfs_root *root,
  1813. struct btrfs_delayed_ref_node *node,
  1814. struct btrfs_delayed_extent_op *extent_op)
  1815. {
  1816. struct btrfs_key key;
  1817. struct btrfs_path *path;
  1818. struct btrfs_extent_item *ei;
  1819. struct extent_buffer *leaf;
  1820. u32 item_size;
  1821. int ret;
  1822. int err = 0;
  1823. if (trans->aborted)
  1824. return 0;
  1825. path = btrfs_alloc_path();
  1826. if (!path)
  1827. return -ENOMEM;
  1828. key.objectid = node->bytenr;
  1829. key.type = BTRFS_EXTENT_ITEM_KEY;
  1830. key.offset = node->num_bytes;
  1831. path->reada = 1;
  1832. path->leave_spinning = 1;
  1833. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
  1834. path, 0, 1);
  1835. if (ret < 0) {
  1836. err = ret;
  1837. goto out;
  1838. }
  1839. if (ret > 0) {
  1840. err = -EIO;
  1841. goto out;
  1842. }
  1843. leaf = path->nodes[0];
  1844. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1845. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1846. if (item_size < sizeof(*ei)) {
  1847. ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
  1848. path, (u64)-1, 0);
  1849. if (ret < 0) {
  1850. err = ret;
  1851. goto out;
  1852. }
  1853. leaf = path->nodes[0];
  1854. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1855. }
  1856. #endif
  1857. BUG_ON(item_size < sizeof(*ei));
  1858. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1859. __run_delayed_extent_op(extent_op, leaf, ei);
  1860. btrfs_mark_buffer_dirty(leaf);
  1861. out:
  1862. btrfs_free_path(path);
  1863. return err;
  1864. }
  1865. static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
  1866. struct btrfs_root *root,
  1867. struct btrfs_delayed_ref_node *node,
  1868. struct btrfs_delayed_extent_op *extent_op,
  1869. int insert_reserved)
  1870. {
  1871. int ret = 0;
  1872. struct btrfs_delayed_tree_ref *ref;
  1873. struct btrfs_key ins;
  1874. u64 parent = 0;
  1875. u64 ref_root = 0;
  1876. ins.objectid = node->bytenr;
  1877. ins.offset = node->num_bytes;
  1878. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1879. ref = btrfs_delayed_node_to_tree_ref(node);
  1880. if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  1881. parent = ref->parent;
  1882. else
  1883. ref_root = ref->root;
  1884. BUG_ON(node->ref_mod != 1);
  1885. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1886. BUG_ON(!extent_op || !extent_op->update_flags ||
  1887. !extent_op->update_key);
  1888. ret = alloc_reserved_tree_block(trans, root,
  1889. parent, ref_root,
  1890. extent_op->flags_to_set,
  1891. &extent_op->key,
  1892. ref->level, &ins);
  1893. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1894. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  1895. node->num_bytes, parent, ref_root,
  1896. ref->level, 0, 1, extent_op);
  1897. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1898. ret = __btrfs_free_extent(trans, root, node->bytenr,
  1899. node->num_bytes, parent, ref_root,
  1900. ref->level, 0, 1, extent_op);
  1901. } else {
  1902. BUG();
  1903. }
  1904. return ret;
  1905. }
  1906. /* helper function to actually process a single delayed ref entry */
  1907. static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
  1908. struct btrfs_root *root,
  1909. struct btrfs_delayed_ref_node *node,
  1910. struct btrfs_delayed_extent_op *extent_op,
  1911. int insert_reserved)
  1912. {
  1913. int ret = 0;
  1914. if (trans->aborted)
  1915. return 0;
  1916. if (btrfs_delayed_ref_is_head(node)) {
  1917. struct btrfs_delayed_ref_head *head;
  1918. /*
  1919. * we've hit the end of the chain and we were supposed
  1920. * to insert this extent into the tree. But, it got
  1921. * deleted before we ever needed to insert it, so all
  1922. * we have to do is clean up the accounting
  1923. */
  1924. BUG_ON(extent_op);
  1925. head = btrfs_delayed_node_to_head(node);
  1926. if (insert_reserved) {
  1927. btrfs_pin_extent(root, node->bytenr,
  1928. node->num_bytes, 1);
  1929. if (head->is_data) {
  1930. ret = btrfs_del_csums(trans, root,
  1931. node->bytenr,
  1932. node->num_bytes);
  1933. }
  1934. }
  1935. return ret;
  1936. }
  1937. if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
  1938. node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  1939. ret = run_delayed_tree_ref(trans, root, node, extent_op,
  1940. insert_reserved);
  1941. else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
  1942. node->type == BTRFS_SHARED_DATA_REF_KEY)
  1943. ret = run_delayed_data_ref(trans, root, node, extent_op,
  1944. insert_reserved);
  1945. else
  1946. BUG();
  1947. return ret;
  1948. }
  1949. static noinline struct btrfs_delayed_ref_node *
  1950. select_delayed_ref(struct btrfs_delayed_ref_head *head)
  1951. {
  1952. struct rb_node *node;
  1953. struct btrfs_delayed_ref_node *ref;
  1954. int action = BTRFS_ADD_DELAYED_REF;
  1955. again:
  1956. /*
  1957. * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
  1958. * this prevents ref count from going down to zero when
  1959. * there still are pending delayed ref.
  1960. */
  1961. node = rb_prev(&head->node.rb_node);
  1962. while (1) {
  1963. if (!node)
  1964. break;
  1965. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  1966. rb_node);
  1967. if (ref->bytenr != head->node.bytenr)
  1968. break;
  1969. if (ref->action == action)
  1970. return ref;
  1971. node = rb_prev(node);
  1972. }
  1973. if (action == BTRFS_ADD_DELAYED_REF) {
  1974. action = BTRFS_DROP_DELAYED_REF;
  1975. goto again;
  1976. }
  1977. return NULL;
  1978. }
  1979. /*
  1980. * Returns 0 on success or if called with an already aborted transaction.
  1981. * Returns -ENOMEM or -EIO on failure and will abort the transaction.
  1982. */
  1983. static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
  1984. struct btrfs_root *root,
  1985. struct list_head *cluster)
  1986. {
  1987. struct btrfs_delayed_ref_root *delayed_refs;
  1988. struct btrfs_delayed_ref_node *ref;
  1989. struct btrfs_delayed_ref_head *locked_ref = NULL;
  1990. struct btrfs_delayed_extent_op *extent_op;
  1991. struct btrfs_fs_info *fs_info = root->fs_info;
  1992. int ret;
  1993. int count = 0;
  1994. int must_insert_reserved = 0;
  1995. delayed_refs = &trans->transaction->delayed_refs;
  1996. while (1) {
  1997. if (!locked_ref) {
  1998. /* pick a new head ref from the cluster list */
  1999. if (list_empty(cluster))
  2000. break;
  2001. locked_ref = list_entry(cluster->next,
  2002. struct btrfs_delayed_ref_head, cluster);
  2003. /* grab the lock that says we are going to process
  2004. * all the refs for this head */
  2005. ret = btrfs_delayed_ref_lock(trans, locked_ref);
  2006. /*
  2007. * we may have dropped the spin lock to get the head
  2008. * mutex lock, and that might have given someone else
  2009. * time to free the head. If that's true, it has been
  2010. * removed from our list and we can move on.
  2011. */
  2012. if (ret == -EAGAIN) {
  2013. locked_ref = NULL;
  2014. count++;
  2015. continue;
  2016. }
  2017. }
  2018. /*
  2019. * We need to try and merge add/drops of the same ref since we
  2020. * can run into issues with relocate dropping the implicit ref
  2021. * and then it being added back again before the drop can
  2022. * finish. If we merged anything we need to re-loop so we can
  2023. * get a good ref.
  2024. */
  2025. btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
  2026. locked_ref);
  2027. /*
  2028. * locked_ref is the head node, so we have to go one
  2029. * node back for any delayed ref updates
  2030. */
  2031. ref = select_delayed_ref(locked_ref);
  2032. if (ref && ref->seq &&
  2033. btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
  2034. /*
  2035. * there are still refs with lower seq numbers in the
  2036. * process of being added. Don't run this ref yet.
  2037. */
  2038. list_del_init(&locked_ref->cluster);
  2039. btrfs_delayed_ref_unlock(locked_ref);
  2040. locked_ref = NULL;
  2041. delayed_refs->num_heads_ready++;
  2042. spin_unlock(&delayed_refs->lock);
  2043. cond_resched();
  2044. spin_lock(&delayed_refs->lock);
  2045. continue;
  2046. }
  2047. /*
  2048. * record the must insert reserved flag before we
  2049. * drop the spin lock.
  2050. */
  2051. must_insert_reserved = locked_ref->must_insert_reserved;
  2052. locked_ref->must_insert_reserved = 0;
  2053. extent_op = locked_ref->extent_op;
  2054. locked_ref->extent_op = NULL;
  2055. if (!ref) {
  2056. /* All delayed refs have been processed, Go ahead
  2057. * and send the head node to run_one_delayed_ref,
  2058. * so that any accounting fixes can happen
  2059. */
  2060. ref = &locked_ref->node;
  2061. if (extent_op && must_insert_reserved) {
  2062. btrfs_free_delayed_extent_op(extent_op);
  2063. extent_op = NULL;
  2064. }
  2065. if (extent_op) {
  2066. spin_unlock(&delayed_refs->lock);
  2067. ret = run_delayed_extent_op(trans, root,
  2068. ref, extent_op);
  2069. btrfs_free_delayed_extent_op(extent_op);
  2070. if (ret) {
  2071. printk(KERN_DEBUG
  2072. "btrfs: run_delayed_extent_op "
  2073. "returned %d\n", ret);
  2074. spin_lock(&delayed_refs->lock);
  2075. btrfs_delayed_ref_unlock(locked_ref);
  2076. return ret;
  2077. }
  2078. goto next;
  2079. }
  2080. }
  2081. ref->in_tree = 0;
  2082. rb_erase(&ref->rb_node, &delayed_refs->root);
  2083. delayed_refs->num_entries--;
  2084. if (!btrfs_delayed_ref_is_head(ref)) {
  2085. /*
  2086. * when we play the delayed ref, also correct the
  2087. * ref_mod on head
  2088. */
  2089. switch (ref->action) {
  2090. case BTRFS_ADD_DELAYED_REF:
  2091. case BTRFS_ADD_DELAYED_EXTENT:
  2092. locked_ref->node.ref_mod -= ref->ref_mod;
  2093. break;
  2094. case BTRFS_DROP_DELAYED_REF:
  2095. locked_ref->node.ref_mod += ref->ref_mod;
  2096. break;
  2097. default:
  2098. WARN_ON(1);
  2099. }
  2100. }
  2101. spin_unlock(&delayed_refs->lock);
  2102. ret = run_one_delayed_ref(trans, root, ref, extent_op,
  2103. must_insert_reserved);
  2104. btrfs_free_delayed_extent_op(extent_op);
  2105. if (ret) {
  2106. btrfs_delayed_ref_unlock(locked_ref);
  2107. btrfs_put_delayed_ref(ref);
  2108. printk(KERN_DEBUG
  2109. "btrfs: run_one_delayed_ref returned %d\n", ret);
  2110. spin_lock(&delayed_refs->lock);
  2111. return ret;
  2112. }
  2113. /*
  2114. * If this node is a head, that means all the refs in this head
  2115. * have been dealt with, and we will pick the next head to deal
  2116. * with, so we must unlock the head and drop it from the cluster
  2117. * list before we release it.
  2118. */
  2119. if (btrfs_delayed_ref_is_head(ref)) {
  2120. list_del_init(&locked_ref->cluster);
  2121. btrfs_delayed_ref_unlock(locked_ref);
  2122. locked_ref = NULL;
  2123. }
  2124. btrfs_put_delayed_ref(ref);
  2125. count++;
  2126. next:
  2127. cond_resched();
  2128. spin_lock(&delayed_refs->lock);
  2129. }
  2130. return count;
  2131. }
  2132. #ifdef SCRAMBLE_DELAYED_REFS
  2133. /*
  2134. * Normally delayed refs get processed in ascending bytenr order. This
  2135. * correlates in most cases to the order added. To expose dependencies on this
  2136. * order, we start to process the tree in the middle instead of the beginning
  2137. */
  2138. static u64 find_middle(struct rb_root *root)
  2139. {
  2140. struct rb_node *n = root->rb_node;
  2141. struct btrfs_delayed_ref_node *entry;
  2142. int alt = 1;
  2143. u64 middle;
  2144. u64 first = 0, last = 0;
  2145. n = rb_first(root);
  2146. if (n) {
  2147. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2148. first = entry->bytenr;
  2149. }
  2150. n = rb_last(root);
  2151. if (n) {
  2152. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2153. last = entry->bytenr;
  2154. }
  2155. n = root->rb_node;
  2156. while (n) {
  2157. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2158. WARN_ON(!entry->in_tree);
  2159. middle = entry->bytenr;
  2160. if (alt)
  2161. n = n->rb_left;
  2162. else
  2163. n = n->rb_right;
  2164. alt = 1 - alt;
  2165. }
  2166. return middle;
  2167. }
  2168. #endif
  2169. int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
  2170. struct btrfs_fs_info *fs_info)
  2171. {
  2172. struct qgroup_update *qgroup_update;
  2173. int ret = 0;
  2174. if (list_empty(&trans->qgroup_ref_list) !=
  2175. !trans->delayed_ref_elem.seq) {
  2176. /* list without seq or seq without list */
  2177. printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
  2178. list_empty(&trans->qgroup_ref_list) ? "" : " not",
  2179. trans->delayed_ref_elem.seq);
  2180. BUG();
  2181. }
  2182. if (!trans->delayed_ref_elem.seq)
  2183. return 0;
  2184. while (!list_empty(&trans->qgroup_ref_list)) {
  2185. qgroup_update = list_first_entry(&trans->qgroup_ref_list,
  2186. struct qgroup_update, list);
  2187. list_del(&qgroup_update->list);
  2188. if (!ret)
  2189. ret = btrfs_qgroup_account_ref(
  2190. trans, fs_info, qgroup_update->node,
  2191. qgroup_update->extent_op);
  2192. kfree(qgroup_update);
  2193. }
  2194. btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
  2195. return ret;
  2196. }
  2197. static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
  2198. int count)
  2199. {
  2200. int val = atomic_read(&delayed_refs->ref_seq);
  2201. if (val < seq || val >= seq + count)
  2202. return 1;
  2203. return 0;
  2204. }
  2205. /*
  2206. * this starts processing the delayed reference count updates and
  2207. * extent insertions we have queued up so far. count can be
  2208. * 0, which means to process everything in the tree at the start
  2209. * of the run (but not newly added entries), or it can be some target
  2210. * number you'd like to process.
  2211. *
  2212. * Returns 0 on success or if called with an aborted transaction
  2213. * Returns <0 on error and aborts the transaction
  2214. */
  2215. int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2216. struct btrfs_root *root, unsigned long count)
  2217. {
  2218. struct rb_node *node;
  2219. struct btrfs_delayed_ref_root *delayed_refs;
  2220. struct btrfs_delayed_ref_node *ref;
  2221. struct list_head cluster;
  2222. int ret;
  2223. u64 delayed_start;
  2224. int run_all = count == (unsigned long)-1;
  2225. int run_most = 0;
  2226. int loops;
  2227. /* We'll clean this up in btrfs_cleanup_transaction */
  2228. if (trans->aborted)
  2229. return 0;
  2230. if (root == root->fs_info->extent_root)
  2231. root = root->fs_info->tree_root;
  2232. btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
  2233. delayed_refs = &trans->transaction->delayed_refs;
  2234. INIT_LIST_HEAD(&cluster);
  2235. if (count == 0) {
  2236. count = delayed_refs->num_entries * 2;
  2237. run_most = 1;
  2238. }
  2239. if (!run_all && !run_most) {
  2240. int old;
  2241. int seq = atomic_read(&delayed_refs->ref_seq);
  2242. progress:
  2243. old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
  2244. if (old) {
  2245. DEFINE_WAIT(__wait);
  2246. if (delayed_refs->num_entries < 16348)
  2247. return 0;
  2248. prepare_to_wait(&delayed_refs->wait, &__wait,
  2249. TASK_UNINTERRUPTIBLE);
  2250. old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
  2251. if (old) {
  2252. schedule();
  2253. finish_wait(&delayed_refs->wait, &__wait);
  2254. if (!refs_newer(delayed_refs, seq, 256))
  2255. goto progress;
  2256. else
  2257. return 0;
  2258. } else {
  2259. finish_wait(&delayed_refs->wait, &__wait);
  2260. goto again;
  2261. }
  2262. }
  2263. } else {
  2264. atomic_inc(&delayed_refs->procs_running_refs);
  2265. }
  2266. again:
  2267. loops = 0;
  2268. spin_lock(&delayed_refs->lock);
  2269. #ifdef SCRAMBLE_DELAYED_REFS
  2270. delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
  2271. #endif
  2272. while (1) {
  2273. if (!(run_all || run_most) &&
  2274. delayed_refs->num_heads_ready < 64)
  2275. break;
  2276. /*
  2277. * go find something we can process in the rbtree. We start at
  2278. * the beginning of the tree, and then build a cluster
  2279. * of refs to process starting at the first one we are able to
  2280. * lock
  2281. */
  2282. delayed_start = delayed_refs->run_delayed_start;
  2283. ret = btrfs_find_ref_cluster(trans, &cluster,
  2284. delayed_refs->run_delayed_start);
  2285. if (ret)
  2286. break;
  2287. ret = run_clustered_refs(trans, root, &cluster);
  2288. if (ret < 0) {
  2289. btrfs_release_ref_cluster(&cluster);
  2290. spin_unlock(&delayed_refs->lock);
  2291. btrfs_abort_transaction(trans, root, ret);
  2292. atomic_dec(&delayed_refs->procs_running_refs);
  2293. return ret;
  2294. }
  2295. atomic_add(ret, &delayed_refs->ref_seq);
  2296. count -= min_t(unsigned long, ret, count);
  2297. if (count == 0)
  2298. break;
  2299. if (delayed_start >= delayed_refs->run_delayed_start) {
  2300. if (loops == 0) {
  2301. /*
  2302. * btrfs_find_ref_cluster looped. let's do one
  2303. * more cycle. if we don't run any delayed ref
  2304. * during that cycle (because we can't because
  2305. * all of them are blocked), bail out.
  2306. */
  2307. loops = 1;
  2308. } else {
  2309. /*
  2310. * no runnable refs left, stop trying
  2311. */
  2312. BUG_ON(run_all);
  2313. break;
  2314. }
  2315. }
  2316. if (ret) {
  2317. /* refs were run, let's reset staleness detection */
  2318. loops = 0;
  2319. }
  2320. }
  2321. if (run_all) {
  2322. if (!list_empty(&trans->new_bgs)) {
  2323. spin_unlock(&delayed_refs->lock);
  2324. btrfs_create_pending_block_groups(trans, root);
  2325. spin_lock(&delayed_refs->lock);
  2326. }
  2327. node = rb_first(&delayed_refs->root);
  2328. if (!node)
  2329. goto out;
  2330. count = (unsigned long)-1;
  2331. while (node) {
  2332. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  2333. rb_node);
  2334. if (btrfs_delayed_ref_is_head(ref)) {
  2335. struct btrfs_delayed_ref_head *head;
  2336. head = btrfs_delayed_node_to_head(ref);
  2337. atomic_inc(&ref->refs);
  2338. spin_unlock(&delayed_refs->lock);
  2339. /*
  2340. * Mutex was contended, block until it's
  2341. * released and try again
  2342. */
  2343. mutex_lock(&head->mutex);
  2344. mutex_unlock(&head->mutex);
  2345. btrfs_put_delayed_ref(ref);
  2346. cond_resched();
  2347. goto again;
  2348. }
  2349. node = rb_next(node);
  2350. }
  2351. spin_unlock(&delayed_refs->lock);
  2352. schedule_timeout(1);
  2353. goto again;
  2354. }
  2355. out:
  2356. atomic_dec(&delayed_refs->procs_running_refs);
  2357. smp_mb();
  2358. if (waitqueue_active(&delayed_refs->wait))
  2359. wake_up(&delayed_refs->wait);
  2360. spin_unlock(&delayed_refs->lock);
  2361. assert_qgroups_uptodate(trans);
  2362. return 0;
  2363. }
  2364. int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
  2365. struct btrfs_root *root,
  2366. u64 bytenr, u64 num_bytes, u64 flags,
  2367. int is_data)
  2368. {
  2369. struct btrfs_delayed_extent_op *extent_op;
  2370. int ret;
  2371. extent_op = btrfs_alloc_delayed_extent_op();
  2372. if (!extent_op)
  2373. return -ENOMEM;
  2374. extent_op->flags_to_set = flags;
  2375. extent_op->update_flags = 1;
  2376. extent_op->update_key = 0;
  2377. extent_op->is_data = is_data ? 1 : 0;
  2378. ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
  2379. num_bytes, extent_op);
  2380. if (ret)
  2381. btrfs_free_delayed_extent_op(extent_op);
  2382. return ret;
  2383. }
  2384. static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
  2385. struct btrfs_root *root,
  2386. struct btrfs_path *path,
  2387. u64 objectid, u64 offset, u64 bytenr)
  2388. {
  2389. struct btrfs_delayed_ref_head *head;
  2390. struct btrfs_delayed_ref_node *ref;
  2391. struct btrfs_delayed_data_ref *data_ref;
  2392. struct btrfs_delayed_ref_root *delayed_refs;
  2393. struct rb_node *node;
  2394. int ret = 0;
  2395. ret = -ENOENT;
  2396. delayed_refs = &trans->transaction->delayed_refs;
  2397. spin_lock(&delayed_refs->lock);
  2398. head = btrfs_find_delayed_ref_head(trans, bytenr);
  2399. if (!head)
  2400. goto out;
  2401. if (!mutex_trylock(&head->mutex)) {
  2402. atomic_inc(&head->node.refs);
  2403. spin_unlock(&delayed_refs->lock);
  2404. btrfs_release_path(path);
  2405. /*
  2406. * Mutex was contended, block until it's released and let
  2407. * caller try again
  2408. */
  2409. mutex_lock(&head->mutex);
  2410. mutex_unlock(&head->mutex);
  2411. btrfs_put_delayed_ref(&head->node);
  2412. return -EAGAIN;
  2413. }
  2414. node = rb_prev(&head->node.rb_node);
  2415. if (!node)
  2416. goto out_unlock;
  2417. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  2418. if (ref->bytenr != bytenr)
  2419. goto out_unlock;
  2420. ret = 1;
  2421. if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
  2422. goto out_unlock;
  2423. data_ref = btrfs_delayed_node_to_data_ref(ref);
  2424. node = rb_prev(node);
  2425. if (node) {
  2426. int seq = ref->seq;
  2427. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  2428. if (ref->bytenr == bytenr && ref->seq == seq)
  2429. goto out_unlock;
  2430. }
  2431. if (data_ref->root != root->root_key.objectid ||
  2432. data_ref->objectid != objectid || data_ref->offset != offset)
  2433. goto out_unlock;
  2434. ret = 0;
  2435. out_unlock:
  2436. mutex_unlock(&head->mutex);
  2437. out:
  2438. spin_unlock(&delayed_refs->lock);
  2439. return ret;
  2440. }
  2441. static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
  2442. struct btrfs_root *root,
  2443. struct btrfs_path *path,
  2444. u64 objectid, u64 offset, u64 bytenr)
  2445. {
  2446. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2447. struct extent_buffer *leaf;
  2448. struct btrfs_extent_data_ref *ref;
  2449. struct btrfs_extent_inline_ref *iref;
  2450. struct btrfs_extent_item *ei;
  2451. struct btrfs_key key;
  2452. u32 item_size;
  2453. int ret;
  2454. key.objectid = bytenr;
  2455. key.offset = (u64)-1;
  2456. key.type = BTRFS_EXTENT_ITEM_KEY;
  2457. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  2458. if (ret < 0)
  2459. goto out;
  2460. BUG_ON(ret == 0); /* Corruption */
  2461. ret = -ENOENT;
  2462. if (path->slots[0] == 0)
  2463. goto out;
  2464. path->slots[0]--;
  2465. leaf = path->nodes[0];
  2466. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2467. if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
  2468. goto out;
  2469. ret = 1;
  2470. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2471. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2472. if (item_size < sizeof(*ei)) {
  2473. WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  2474. goto out;
  2475. }
  2476. #endif
  2477. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2478. if (item_size != sizeof(*ei) +
  2479. btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
  2480. goto out;
  2481. if (btrfs_extent_generation(leaf, ei) <=
  2482. btrfs_root_last_snapshot(&root->root_item))
  2483. goto out;
  2484. iref = (struct btrfs_extent_inline_ref *)(ei + 1);
  2485. if (btrfs_extent_inline_ref_type(leaf, iref) !=
  2486. BTRFS_EXTENT_DATA_REF_KEY)
  2487. goto out;
  2488. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  2489. if (btrfs_extent_refs(leaf, ei) !=
  2490. btrfs_extent_data_ref_count(leaf, ref) ||
  2491. btrfs_extent_data_ref_root(leaf, ref) !=
  2492. root->root_key.objectid ||
  2493. btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
  2494. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  2495. goto out;
  2496. ret = 0;
  2497. out:
  2498. return ret;
  2499. }
  2500. int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
  2501. struct btrfs_root *root,
  2502. u64 objectid, u64 offset, u64 bytenr)
  2503. {
  2504. struct btrfs_path *path;
  2505. int ret;
  2506. int ret2;
  2507. path = btrfs_alloc_path();
  2508. if (!path)
  2509. return -ENOENT;
  2510. do {
  2511. ret = check_committed_ref(trans, root, path, objectid,
  2512. offset, bytenr);
  2513. if (ret && ret != -ENOENT)
  2514. goto out;
  2515. ret2 = check_delayed_ref(trans, root, path, objectid,
  2516. offset, bytenr);
  2517. } while (ret2 == -EAGAIN);
  2518. if (ret2 && ret2 != -ENOENT) {
  2519. ret = ret2;
  2520. goto out;
  2521. }
  2522. if (ret != -ENOENT || ret2 != -ENOENT)
  2523. ret = 0;
  2524. out:
  2525. btrfs_free_path(path);
  2526. if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
  2527. WARN_ON(ret > 0);
  2528. return ret;
  2529. }
  2530. static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
  2531. struct btrfs_root *root,
  2532. struct extent_buffer *buf,
  2533. int full_backref, int inc, int for_cow)
  2534. {
  2535. u64 bytenr;
  2536. u64 num_bytes;
  2537. u64 parent;
  2538. u64 ref_root;
  2539. u32 nritems;
  2540. struct btrfs_key key;
  2541. struct btrfs_file_extent_item *fi;
  2542. int i;
  2543. int level;
  2544. int ret = 0;
  2545. int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
  2546. u64, u64, u64, u64, u64, u64, int);
  2547. ref_root = btrfs_header_owner(buf);
  2548. nritems = btrfs_header_nritems(buf);
  2549. level = btrfs_header_level(buf);
  2550. if (!root->ref_cows && level == 0)
  2551. return 0;
  2552. if (inc)
  2553. process_func = btrfs_inc_extent_ref;
  2554. else
  2555. process_func = btrfs_free_extent;
  2556. if (full_backref)
  2557. parent = buf->start;
  2558. else
  2559. parent = 0;
  2560. for (i = 0; i < nritems; i++) {
  2561. if (level == 0) {
  2562. btrfs_item_key_to_cpu(buf, &key, i);
  2563. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  2564. continue;
  2565. fi = btrfs_item_ptr(buf, i,
  2566. struct btrfs_file_extent_item);
  2567. if (btrfs_file_extent_type(buf, fi) ==
  2568. BTRFS_FILE_EXTENT_INLINE)
  2569. continue;
  2570. bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  2571. if (bytenr == 0)
  2572. continue;
  2573. num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
  2574. key.offset -= btrfs_file_extent_offset(buf, fi);
  2575. ret = process_func(trans, root, bytenr, num_bytes,
  2576. parent, ref_root, key.objectid,
  2577. key.offset, for_cow);
  2578. if (ret)
  2579. goto fail;
  2580. } else {
  2581. bytenr = btrfs_node_blockptr(buf, i);
  2582. num_bytes = btrfs_level_size(root, level - 1);
  2583. ret = process_func(trans, root, bytenr, num_bytes,
  2584. parent, ref_root, level - 1, 0,
  2585. for_cow);
  2586. if (ret)
  2587. goto fail;
  2588. }
  2589. }
  2590. return 0;
  2591. fail:
  2592. return ret;
  2593. }
  2594. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2595. struct extent_buffer *buf, int full_backref, int for_cow)
  2596. {
  2597. return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
  2598. }
  2599. int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2600. struct extent_buffer *buf, int full_backref, int for_cow)
  2601. {
  2602. return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
  2603. }
  2604. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  2605. struct btrfs_root *root,
  2606. struct btrfs_path *path,
  2607. struct btrfs_block_group_cache *cache)
  2608. {
  2609. int ret;
  2610. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2611. unsigned long bi;
  2612. struct extent_buffer *leaf;
  2613. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  2614. if (ret < 0)
  2615. goto fail;
  2616. BUG_ON(ret); /* Corruption */
  2617. leaf = path->nodes[0];
  2618. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  2619. write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
  2620. btrfs_mark_buffer_dirty(leaf);
  2621. btrfs_release_path(path);
  2622. fail:
  2623. if (ret) {
  2624. btrfs_abort_transaction(trans, root, ret);
  2625. return ret;
  2626. }
  2627. return 0;
  2628. }
  2629. static struct btrfs_block_group_cache *
  2630. next_block_group(struct btrfs_root *root,
  2631. struct btrfs_block_group_cache *cache)
  2632. {
  2633. struct rb_node *node;
  2634. spin_lock(&root->fs_info->block_group_cache_lock);
  2635. node = rb_next(&cache->cache_node);
  2636. btrfs_put_block_group(cache);
  2637. if (node) {
  2638. cache = rb_entry(node, struct btrfs_block_group_cache,
  2639. cache_node);
  2640. btrfs_get_block_group(cache);
  2641. } else
  2642. cache = NULL;
  2643. spin_unlock(&root->fs_info->block_group_cache_lock);
  2644. return cache;
  2645. }
  2646. static int cache_save_setup(struct btrfs_block_group_cache *block_group,
  2647. struct btrfs_trans_handle *trans,
  2648. struct btrfs_path *path)
  2649. {
  2650. struct btrfs_root *root = block_group->fs_info->tree_root;
  2651. struct inode *inode = NULL;
  2652. u64 alloc_hint = 0;
  2653. int dcs = BTRFS_DC_ERROR;
  2654. int num_pages = 0;
  2655. int retries = 0;
  2656. int ret = 0;
  2657. /*
  2658. * If this block group is smaller than 100 megs don't bother caching the
  2659. * block group.
  2660. */
  2661. if (block_group->key.offset < (100 * 1024 * 1024)) {
  2662. spin_lock(&block_group->lock);
  2663. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  2664. spin_unlock(&block_group->lock);
  2665. return 0;
  2666. }
  2667. again:
  2668. inode = lookup_free_space_inode(root, block_group, path);
  2669. if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
  2670. ret = PTR_ERR(inode);
  2671. btrfs_release_path(path);
  2672. goto out;
  2673. }
  2674. if (IS_ERR(inode)) {
  2675. BUG_ON(retries);
  2676. retries++;
  2677. if (block_group->ro)
  2678. goto out_free;
  2679. ret = create_free_space_inode(root, trans, block_group, path);
  2680. if (ret)
  2681. goto out_free;
  2682. goto again;
  2683. }
  2684. /* We've already setup this transaction, go ahead and exit */
  2685. if (block_group->cache_generation == trans->transid &&
  2686. i_size_read(inode)) {
  2687. dcs = BTRFS_DC_SETUP;
  2688. goto out_put;
  2689. }
  2690. /*
  2691. * We want to set the generation to 0, that way if anything goes wrong
  2692. * from here on out we know not to trust this cache when we load up next
  2693. * time.
  2694. */
  2695. BTRFS_I(inode)->generation = 0;
  2696. ret = btrfs_update_inode(trans, root, inode);
  2697. WARN_ON(ret);
  2698. if (i_size_read(inode) > 0) {
  2699. ret = btrfs_truncate_free_space_cache(root, trans, path,
  2700. inode);
  2701. if (ret)
  2702. goto out_put;
  2703. }
  2704. spin_lock(&block_group->lock);
  2705. if (block_group->cached != BTRFS_CACHE_FINISHED ||
  2706. !btrfs_test_opt(root, SPACE_CACHE)) {
  2707. /*
  2708. * don't bother trying to write stuff out _if_
  2709. * a) we're not cached,
  2710. * b) we're with nospace_cache mount option.
  2711. */
  2712. dcs = BTRFS_DC_WRITTEN;
  2713. spin_unlock(&block_group->lock);
  2714. goto out_put;
  2715. }
  2716. spin_unlock(&block_group->lock);
  2717. /*
  2718. * Try to preallocate enough space based on how big the block group is.
  2719. * Keep in mind this has to include any pinned space which could end up
  2720. * taking up quite a bit since it's not folded into the other space
  2721. * cache.
  2722. */
  2723. num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
  2724. if (!num_pages)
  2725. num_pages = 1;
  2726. num_pages *= 16;
  2727. num_pages *= PAGE_CACHE_SIZE;
  2728. ret = btrfs_check_data_free_space(inode, num_pages);
  2729. if (ret)
  2730. goto out_put;
  2731. ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
  2732. num_pages, num_pages,
  2733. &alloc_hint);
  2734. if (!ret)
  2735. dcs = BTRFS_DC_SETUP;
  2736. btrfs_free_reserved_data_space(inode, num_pages);
  2737. out_put:
  2738. iput(inode);
  2739. out_free:
  2740. btrfs_release_path(path);
  2741. out:
  2742. spin_lock(&block_group->lock);
  2743. if (!ret && dcs == BTRFS_DC_SETUP)
  2744. block_group->cache_generation = trans->transid;
  2745. block_group->disk_cache_state = dcs;
  2746. spin_unlock(&block_group->lock);
  2747. return ret;
  2748. }
  2749. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  2750. struct btrfs_root *root)
  2751. {
  2752. struct btrfs_block_group_cache *cache;
  2753. int err = 0;
  2754. struct btrfs_path *path;
  2755. u64 last = 0;
  2756. path = btrfs_alloc_path();
  2757. if (!path)
  2758. return -ENOMEM;
  2759. again:
  2760. while (1) {
  2761. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2762. while (cache) {
  2763. if (cache->disk_cache_state == BTRFS_DC_CLEAR)
  2764. break;
  2765. cache = next_block_group(root, cache);
  2766. }
  2767. if (!cache) {
  2768. if (last == 0)
  2769. break;
  2770. last = 0;
  2771. continue;
  2772. }
  2773. err = cache_save_setup(cache, trans, path);
  2774. last = cache->key.objectid + cache->key.offset;
  2775. btrfs_put_block_group(cache);
  2776. }
  2777. while (1) {
  2778. if (last == 0) {
  2779. err = btrfs_run_delayed_refs(trans, root,
  2780. (unsigned long)-1);
  2781. if (err) /* File system offline */
  2782. goto out;
  2783. }
  2784. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2785. while (cache) {
  2786. if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
  2787. btrfs_put_block_group(cache);
  2788. goto again;
  2789. }
  2790. if (cache->dirty)
  2791. break;
  2792. cache = next_block_group(root, cache);
  2793. }
  2794. if (!cache) {
  2795. if (last == 0)
  2796. break;
  2797. last = 0;
  2798. continue;
  2799. }
  2800. if (cache->disk_cache_state == BTRFS_DC_SETUP)
  2801. cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
  2802. cache->dirty = 0;
  2803. last = cache->key.objectid + cache->key.offset;
  2804. err = write_one_cache_group(trans, root, path, cache);
  2805. if (err) /* File system offline */
  2806. goto out;
  2807. btrfs_put_block_group(cache);
  2808. }
  2809. while (1) {
  2810. /*
  2811. * I don't think this is needed since we're just marking our
  2812. * preallocated extent as written, but just in case it can't
  2813. * hurt.
  2814. */
  2815. if (last == 0) {
  2816. err = btrfs_run_delayed_refs(trans, root,
  2817. (unsigned long)-1);
  2818. if (err) /* File system offline */
  2819. goto out;
  2820. }
  2821. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2822. while (cache) {
  2823. /*
  2824. * Really this shouldn't happen, but it could if we
  2825. * couldn't write the entire preallocated extent and
  2826. * splitting the extent resulted in a new block.
  2827. */
  2828. if (cache->dirty) {
  2829. btrfs_put_block_group(cache);
  2830. goto again;
  2831. }
  2832. if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  2833. break;
  2834. cache = next_block_group(root, cache);
  2835. }
  2836. if (!cache) {
  2837. if (last == 0)
  2838. break;
  2839. last = 0;
  2840. continue;
  2841. }
  2842. err = btrfs_write_out_cache(root, trans, cache, path);
  2843. /*
  2844. * If we didn't have an error then the cache state is still
  2845. * NEED_WRITE, so we can set it to WRITTEN.
  2846. */
  2847. if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  2848. cache->disk_cache_state = BTRFS_DC_WRITTEN;
  2849. last = cache->key.objectid + cache->key.offset;
  2850. btrfs_put_block_group(cache);
  2851. }
  2852. out:
  2853. btrfs_free_path(path);
  2854. return err;
  2855. }
  2856. int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
  2857. {
  2858. struct btrfs_block_group_cache *block_group;
  2859. int readonly = 0;
  2860. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  2861. if (!block_group || block_group->ro)
  2862. readonly = 1;
  2863. if (block_group)
  2864. btrfs_put_block_group(block_group);
  2865. return readonly;
  2866. }
  2867. static int update_space_info(struct btrfs_fs_info *info, u64 flags,
  2868. u64 total_bytes, u64 bytes_used,
  2869. struct btrfs_space_info **space_info)
  2870. {
  2871. struct btrfs_space_info *found;
  2872. int i;
  2873. int factor;
  2874. if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
  2875. BTRFS_BLOCK_GROUP_RAID10))
  2876. factor = 2;
  2877. else
  2878. factor = 1;
  2879. found = __find_space_info(info, flags);
  2880. if (found) {
  2881. spin_lock(&found->lock);
  2882. found->total_bytes += total_bytes;
  2883. found->disk_total += total_bytes * factor;
  2884. found->bytes_used += bytes_used;
  2885. found->disk_used += bytes_used * factor;
  2886. found->full = 0;
  2887. spin_unlock(&found->lock);
  2888. *space_info = found;
  2889. return 0;
  2890. }
  2891. found = kzalloc(sizeof(*found), GFP_NOFS);
  2892. if (!found)
  2893. return -ENOMEM;
  2894. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  2895. INIT_LIST_HEAD(&found->block_groups[i]);
  2896. init_rwsem(&found->groups_sem);
  2897. spin_lock_init(&found->lock);
  2898. found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
  2899. found->total_bytes = total_bytes;
  2900. found->disk_total = total_bytes * factor;
  2901. found->bytes_used = bytes_used;
  2902. found->disk_used = bytes_used * factor;
  2903. found->bytes_pinned = 0;
  2904. found->bytes_reserved = 0;
  2905. found->bytes_readonly = 0;
  2906. found->bytes_may_use = 0;
  2907. found->full = 0;
  2908. found->force_alloc = CHUNK_ALLOC_NO_FORCE;
  2909. found->chunk_alloc = 0;
  2910. found->flush = 0;
  2911. init_waitqueue_head(&found->wait);
  2912. *space_info = found;
  2913. list_add_rcu(&found->list, &info->space_info);
  2914. if (flags & BTRFS_BLOCK_GROUP_DATA)
  2915. info->data_sinfo = found;
  2916. return 0;
  2917. }
  2918. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  2919. {
  2920. u64 extra_flags = chunk_to_extended(flags) &
  2921. BTRFS_EXTENDED_PROFILE_MASK;
  2922. write_seqlock(&fs_info->profiles_lock);
  2923. if (flags & BTRFS_BLOCK_GROUP_DATA)
  2924. fs_info->avail_data_alloc_bits |= extra_flags;
  2925. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  2926. fs_info->avail_metadata_alloc_bits |= extra_flags;
  2927. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  2928. fs_info->avail_system_alloc_bits |= extra_flags;
  2929. write_sequnlock(&fs_info->profiles_lock);
  2930. }
  2931. /*
  2932. * returns target flags in extended format or 0 if restripe for this
  2933. * chunk_type is not in progress
  2934. *
  2935. * should be called with either volume_mutex or balance_lock held
  2936. */
  2937. static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  2938. {
  2939. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  2940. u64 target = 0;
  2941. if (!bctl)
  2942. return 0;
  2943. if (flags & BTRFS_BLOCK_GROUP_DATA &&
  2944. bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  2945. target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  2946. } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  2947. bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  2948. target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  2949. } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  2950. bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  2951. target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  2952. }
  2953. return target;
  2954. }
  2955. /*
  2956. * @flags: available profiles in extended format (see ctree.h)
  2957. *
  2958. * Returns reduced profile in chunk format. If profile changing is in
  2959. * progress (either running or paused) picks the target profile (if it's
  2960. * already available), otherwise falls back to plain reducing.
  2961. */
  2962. u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
  2963. {
  2964. /*
  2965. * we add in the count of missing devices because we want
  2966. * to make sure that any RAID levels on a degraded FS
  2967. * continue to be honored.
  2968. */
  2969. u64 num_devices = root->fs_info->fs_devices->rw_devices +
  2970. root->fs_info->fs_devices->missing_devices;
  2971. u64 target;
  2972. u64 tmp;
  2973. /*
  2974. * see if restripe for this chunk_type is in progress, if so
  2975. * try to reduce to the target profile
  2976. */
  2977. spin_lock(&root->fs_info->balance_lock);
  2978. target = get_restripe_target(root->fs_info, flags);
  2979. if (target) {
  2980. /* pick target profile only if it's already available */
  2981. if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
  2982. spin_unlock(&root->fs_info->balance_lock);
  2983. return extended_to_chunk(target);
  2984. }
  2985. }
  2986. spin_unlock(&root->fs_info->balance_lock);
  2987. /* First, mask out the RAID levels which aren't possible */
  2988. if (num_devices == 1)
  2989. flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
  2990. BTRFS_BLOCK_GROUP_RAID5);
  2991. if (num_devices < 3)
  2992. flags &= ~BTRFS_BLOCK_GROUP_RAID6;
  2993. if (num_devices < 4)
  2994. flags &= ~BTRFS_BLOCK_GROUP_RAID10;
  2995. tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
  2996. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
  2997. BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
  2998. flags &= ~tmp;
  2999. if (tmp & BTRFS_BLOCK_GROUP_RAID6)
  3000. tmp = BTRFS_BLOCK_GROUP_RAID6;
  3001. else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
  3002. tmp = BTRFS_BLOCK_GROUP_RAID5;
  3003. else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
  3004. tmp = BTRFS_BLOCK_GROUP_RAID10;
  3005. else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
  3006. tmp = BTRFS_BLOCK_GROUP_RAID1;
  3007. else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
  3008. tmp = BTRFS_BLOCK_GROUP_RAID0;
  3009. return extended_to_chunk(flags | tmp);
  3010. }
  3011. static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
  3012. {
  3013. unsigned seq;
  3014. do {
  3015. seq = read_seqbegin(&root->fs_info->profiles_lock);
  3016. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3017. flags |= root->fs_info->avail_data_alloc_bits;
  3018. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3019. flags |= root->fs_info->avail_system_alloc_bits;
  3020. else if (flags & BTRFS_BLOCK_GROUP_METADATA)
  3021. flags |= root->fs_info->avail_metadata_alloc_bits;
  3022. } while (read_seqretry(&root->fs_info->profiles_lock, seq));
  3023. return btrfs_reduce_alloc_profile(root, flags);
  3024. }
  3025. u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
  3026. {
  3027. u64 flags;
  3028. u64 ret;
  3029. if (data)
  3030. flags = BTRFS_BLOCK_GROUP_DATA;
  3031. else if (root == root->fs_info->chunk_root)
  3032. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  3033. else
  3034. flags = BTRFS_BLOCK_GROUP_METADATA;
  3035. ret = get_alloc_profile(root, flags);
  3036. return ret;
  3037. }
  3038. /*
  3039. * This will check the space that the inode allocates from to make sure we have
  3040. * enough space for bytes.
  3041. */
  3042. int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
  3043. {
  3044. struct btrfs_space_info *data_sinfo;
  3045. struct btrfs_root *root = BTRFS_I(inode)->root;
  3046. struct btrfs_fs_info *fs_info = root->fs_info;
  3047. u64 used;
  3048. int ret = 0, committed = 0, alloc_chunk = 1;
  3049. /* make sure bytes are sectorsize aligned */
  3050. bytes = ALIGN(bytes, root->sectorsize);
  3051. if (root == root->fs_info->tree_root ||
  3052. BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
  3053. alloc_chunk = 0;
  3054. committed = 1;
  3055. }
  3056. data_sinfo = fs_info->data_sinfo;
  3057. if (!data_sinfo)
  3058. goto alloc;
  3059. again:
  3060. /* make sure we have enough space to handle the data first */
  3061. spin_lock(&data_sinfo->lock);
  3062. used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
  3063. data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
  3064. data_sinfo->bytes_may_use;
  3065. if (used + bytes > data_sinfo->total_bytes) {
  3066. struct btrfs_trans_handle *trans;
  3067. /*
  3068. * if we don't have enough free bytes in this space then we need
  3069. * to alloc a new chunk.
  3070. */
  3071. if (!data_sinfo->full && alloc_chunk) {
  3072. u64 alloc_target;
  3073. data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
  3074. spin_unlock(&data_sinfo->lock);
  3075. alloc:
  3076. alloc_target = btrfs_get_alloc_profile(root, 1);
  3077. trans = btrfs_join_transaction(root);
  3078. if (IS_ERR(trans))
  3079. return PTR_ERR(trans);
  3080. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  3081. alloc_target,
  3082. CHUNK_ALLOC_NO_FORCE);
  3083. btrfs_end_transaction(trans, root);
  3084. if (ret < 0) {
  3085. if (ret != -ENOSPC)
  3086. return ret;
  3087. else
  3088. goto commit_trans;
  3089. }
  3090. if (!data_sinfo)
  3091. data_sinfo = fs_info->data_sinfo;
  3092. goto again;
  3093. }
  3094. /*
  3095. * If we have less pinned bytes than we want to allocate then
  3096. * don't bother committing the transaction, it won't help us.
  3097. */
  3098. if (data_sinfo->bytes_pinned < bytes)
  3099. committed = 1;
  3100. spin_unlock(&data_sinfo->lock);
  3101. /* commit the current transaction and try again */
  3102. commit_trans:
  3103. if (!committed &&
  3104. !atomic_read(&root->fs_info->open_ioctl_trans)) {
  3105. committed = 1;
  3106. trans = btrfs_join_transaction(root);
  3107. if (IS_ERR(trans))
  3108. return PTR_ERR(trans);
  3109. ret = btrfs_commit_transaction(trans, root);
  3110. if (ret)
  3111. return ret;
  3112. goto again;
  3113. }
  3114. return -ENOSPC;
  3115. }
  3116. data_sinfo->bytes_may_use += bytes;
  3117. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3118. data_sinfo->flags, bytes, 1);
  3119. spin_unlock(&data_sinfo->lock);
  3120. return 0;
  3121. }
  3122. /*
  3123. * Called if we need to clear a data reservation for this inode.
  3124. */
  3125. void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
  3126. {
  3127. struct btrfs_root *root = BTRFS_I(inode)->root;
  3128. struct btrfs_space_info *data_sinfo;
  3129. /* make sure bytes are sectorsize aligned */
  3130. bytes = ALIGN(bytes, root->sectorsize);
  3131. data_sinfo = root->fs_info->data_sinfo;
  3132. spin_lock(&data_sinfo->lock);
  3133. data_sinfo->bytes_may_use -= bytes;
  3134. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3135. data_sinfo->flags, bytes, 0);
  3136. spin_unlock(&data_sinfo->lock);
  3137. }
  3138. static void force_metadata_allocation(struct btrfs_fs_info *info)
  3139. {
  3140. struct list_head *head = &info->space_info;
  3141. struct btrfs_space_info *found;
  3142. rcu_read_lock();
  3143. list_for_each_entry_rcu(found, head, list) {
  3144. if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
  3145. found->force_alloc = CHUNK_ALLOC_FORCE;
  3146. }
  3147. rcu_read_unlock();
  3148. }
  3149. static int should_alloc_chunk(struct btrfs_root *root,
  3150. struct btrfs_space_info *sinfo, int force)
  3151. {
  3152. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3153. u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
  3154. u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
  3155. u64 thresh;
  3156. if (force == CHUNK_ALLOC_FORCE)
  3157. return 1;
  3158. /*
  3159. * We need to take into account the global rsv because for all intents
  3160. * and purposes it's used space. Don't worry about locking the
  3161. * global_rsv, it doesn't change except when the transaction commits.
  3162. */
  3163. if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
  3164. num_allocated += global_rsv->size;
  3165. /*
  3166. * in limited mode, we want to have some free space up to
  3167. * about 1% of the FS size.
  3168. */
  3169. if (force == CHUNK_ALLOC_LIMITED) {
  3170. thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
  3171. thresh = max_t(u64, 64 * 1024 * 1024,
  3172. div_factor_fine(thresh, 1));
  3173. if (num_bytes - num_allocated < thresh)
  3174. return 1;
  3175. }
  3176. if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
  3177. return 0;
  3178. return 1;
  3179. }
  3180. static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
  3181. {
  3182. u64 num_dev;
  3183. if (type & (BTRFS_BLOCK_GROUP_RAID10 |
  3184. BTRFS_BLOCK_GROUP_RAID0 |
  3185. BTRFS_BLOCK_GROUP_RAID5 |
  3186. BTRFS_BLOCK_GROUP_RAID6))
  3187. num_dev = root->fs_info->fs_devices->rw_devices;
  3188. else if (type & BTRFS_BLOCK_GROUP_RAID1)
  3189. num_dev = 2;
  3190. else
  3191. num_dev = 1; /* DUP or single */
  3192. /* metadata for updaing devices and chunk tree */
  3193. return btrfs_calc_trans_metadata_size(root, num_dev + 1);
  3194. }
  3195. static void check_system_chunk(struct btrfs_trans_handle *trans,
  3196. struct btrfs_root *root, u64 type)
  3197. {
  3198. struct btrfs_space_info *info;
  3199. u64 left;
  3200. u64 thresh;
  3201. info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3202. spin_lock(&info->lock);
  3203. left = info->total_bytes - info->bytes_used - info->bytes_pinned -
  3204. info->bytes_reserved - info->bytes_readonly;
  3205. spin_unlock(&info->lock);
  3206. thresh = get_system_chunk_thresh(root, type);
  3207. if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
  3208. printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
  3209. left, thresh, type);
  3210. dump_space_info(info, 0, 0);
  3211. }
  3212. if (left < thresh) {
  3213. u64 flags;
  3214. flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
  3215. btrfs_alloc_chunk(trans, root, flags);
  3216. }
  3217. }
  3218. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  3219. struct btrfs_root *extent_root, u64 flags, int force)
  3220. {
  3221. struct btrfs_space_info *space_info;
  3222. struct btrfs_fs_info *fs_info = extent_root->fs_info;
  3223. int wait_for_alloc = 0;
  3224. int ret = 0;
  3225. /* Don't re-enter if we're already allocating a chunk */
  3226. if (trans->allocating_chunk)
  3227. return -ENOSPC;
  3228. space_info = __find_space_info(extent_root->fs_info, flags);
  3229. if (!space_info) {
  3230. ret = update_space_info(extent_root->fs_info, flags,
  3231. 0, 0, &space_info);
  3232. BUG_ON(ret); /* -ENOMEM */
  3233. }
  3234. BUG_ON(!space_info); /* Logic error */
  3235. again:
  3236. spin_lock(&space_info->lock);
  3237. if (force < space_info->force_alloc)
  3238. force = space_info->force_alloc;
  3239. if (space_info->full) {
  3240. spin_unlock(&space_info->lock);
  3241. return 0;
  3242. }
  3243. if (!should_alloc_chunk(extent_root, space_info, force)) {
  3244. spin_unlock(&space_info->lock);
  3245. return 0;
  3246. } else if (space_info->chunk_alloc) {
  3247. wait_for_alloc = 1;
  3248. } else {
  3249. space_info->chunk_alloc = 1;
  3250. }
  3251. spin_unlock(&space_info->lock);
  3252. mutex_lock(&fs_info->chunk_mutex);
  3253. /*
  3254. * The chunk_mutex is held throughout the entirety of a chunk
  3255. * allocation, so once we've acquired the chunk_mutex we know that the
  3256. * other guy is done and we need to recheck and see if we should
  3257. * allocate.
  3258. */
  3259. if (wait_for_alloc) {
  3260. mutex_unlock(&fs_info->chunk_mutex);
  3261. wait_for_alloc = 0;
  3262. goto again;
  3263. }
  3264. trans->allocating_chunk = true;
  3265. /*
  3266. * If we have mixed data/metadata chunks we want to make sure we keep
  3267. * allocating mixed chunks instead of individual chunks.
  3268. */
  3269. if (btrfs_mixed_space_info(space_info))
  3270. flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
  3271. /*
  3272. * if we're doing a data chunk, go ahead and make sure that
  3273. * we keep a reasonable number of metadata chunks allocated in the
  3274. * FS as well.
  3275. */
  3276. if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
  3277. fs_info->data_chunk_allocations++;
  3278. if (!(fs_info->data_chunk_allocations %
  3279. fs_info->metadata_ratio))
  3280. force_metadata_allocation(fs_info);
  3281. }
  3282. /*
  3283. * Check if we have enough space in SYSTEM chunk because we may need
  3284. * to update devices.
  3285. */
  3286. check_system_chunk(trans, extent_root, flags);
  3287. ret = btrfs_alloc_chunk(trans, extent_root, flags);
  3288. trans->allocating_chunk = false;
  3289. spin_lock(&space_info->lock);
  3290. if (ret < 0 && ret != -ENOSPC)
  3291. goto out;
  3292. if (ret)
  3293. space_info->full = 1;
  3294. else
  3295. ret = 1;
  3296. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  3297. out:
  3298. space_info->chunk_alloc = 0;
  3299. spin_unlock(&space_info->lock);
  3300. mutex_unlock(&fs_info->chunk_mutex);
  3301. return ret;
  3302. }
  3303. static int can_overcommit(struct btrfs_root *root,
  3304. struct btrfs_space_info *space_info, u64 bytes,
  3305. enum btrfs_reserve_flush_enum flush)
  3306. {
  3307. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3308. u64 profile = btrfs_get_alloc_profile(root, 0);
  3309. u64 rsv_size = 0;
  3310. u64 avail;
  3311. u64 used;
  3312. u64 to_add;
  3313. used = space_info->bytes_used + space_info->bytes_reserved +
  3314. space_info->bytes_pinned + space_info->bytes_readonly;
  3315. spin_lock(&global_rsv->lock);
  3316. rsv_size = global_rsv->size;
  3317. spin_unlock(&global_rsv->lock);
  3318. /*
  3319. * We only want to allow over committing if we have lots of actual space
  3320. * free, but if we don't have enough space to handle the global reserve
  3321. * space then we could end up having a real enospc problem when trying
  3322. * to allocate a chunk or some other such important allocation.
  3323. */
  3324. rsv_size <<= 1;
  3325. if (used + rsv_size >= space_info->total_bytes)
  3326. return 0;
  3327. used += space_info->bytes_may_use;
  3328. spin_lock(&root->fs_info->free_chunk_lock);
  3329. avail = root->fs_info->free_chunk_space;
  3330. spin_unlock(&root->fs_info->free_chunk_lock);
  3331. /*
  3332. * If we have dup, raid1 or raid10 then only half of the free
  3333. * space is actually useable. For raid56, the space info used
  3334. * doesn't include the parity drive, so we don't have to
  3335. * change the math
  3336. */
  3337. if (profile & (BTRFS_BLOCK_GROUP_DUP |
  3338. BTRFS_BLOCK_GROUP_RAID1 |
  3339. BTRFS_BLOCK_GROUP_RAID10))
  3340. avail >>= 1;
  3341. to_add = space_info->total_bytes;
  3342. /*
  3343. * If we aren't flushing all things, let us overcommit up to
  3344. * 1/2th of the space. If we can flush, don't let us overcommit
  3345. * too much, let it overcommit up to 1/8 of the space.
  3346. */
  3347. if (flush == BTRFS_RESERVE_FLUSH_ALL)
  3348. to_add >>= 3;
  3349. else
  3350. to_add >>= 1;
  3351. /*
  3352. * Limit the overcommit to the amount of free space we could possibly
  3353. * allocate for chunks.
  3354. */
  3355. to_add = min(avail, to_add);
  3356. if (used + bytes < space_info->total_bytes + to_add)
  3357. return 1;
  3358. return 0;
  3359. }
  3360. static inline int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,
  3361. unsigned long nr_pages,
  3362. enum wb_reason reason)
  3363. {
  3364. /* the flusher is dealing with the dirty inodes now. */
  3365. if (writeback_in_progress(sb->s_bdi))
  3366. return 1;
  3367. if (down_read_trylock(&sb->s_umount)) {
  3368. writeback_inodes_sb_nr(sb, nr_pages, reason);
  3369. up_read(&sb->s_umount);
  3370. return 1;
  3371. }
  3372. return 0;
  3373. }
  3374. void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
  3375. unsigned long nr_pages)
  3376. {
  3377. struct super_block *sb = root->fs_info->sb;
  3378. int started;
  3379. /* If we can not start writeback, just sync all the delalloc file. */
  3380. started = writeback_inodes_sb_nr_if_idle_safe(sb, nr_pages,
  3381. WB_REASON_FS_FREE_SPACE);
  3382. if (!started) {
  3383. /*
  3384. * We needn't worry the filesystem going from r/w to r/o though
  3385. * we don't acquire ->s_umount mutex, because the filesystem
  3386. * should guarantee the delalloc inodes list be empty after
  3387. * the filesystem is readonly(all dirty pages are written to
  3388. * the disk).
  3389. */
  3390. btrfs_start_delalloc_inodes(root, 0);
  3391. btrfs_wait_ordered_extents(root, 0);
  3392. }
  3393. }
  3394. /*
  3395. * shrink metadata reservation for delalloc
  3396. */
  3397. static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
  3398. bool wait_ordered)
  3399. {
  3400. struct btrfs_block_rsv *block_rsv;
  3401. struct btrfs_space_info *space_info;
  3402. struct btrfs_trans_handle *trans;
  3403. u64 delalloc_bytes;
  3404. u64 max_reclaim;
  3405. long time_left;
  3406. unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
  3407. int loops = 0;
  3408. enum btrfs_reserve_flush_enum flush;
  3409. trans = (struct btrfs_trans_handle *)current->journal_info;
  3410. block_rsv = &root->fs_info->delalloc_block_rsv;
  3411. space_info = block_rsv->space_info;
  3412. smp_mb();
  3413. delalloc_bytes = percpu_counter_sum_positive(
  3414. &root->fs_info->delalloc_bytes);
  3415. if (delalloc_bytes == 0) {
  3416. if (trans)
  3417. return;
  3418. btrfs_wait_ordered_extents(root, 0);
  3419. return;
  3420. }
  3421. while (delalloc_bytes && loops < 3) {
  3422. max_reclaim = min(delalloc_bytes, to_reclaim);
  3423. nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
  3424. btrfs_writeback_inodes_sb_nr(root, nr_pages);
  3425. /*
  3426. * We need to wait for the async pages to actually start before
  3427. * we do anything.
  3428. */
  3429. wait_event(root->fs_info->async_submit_wait,
  3430. !atomic_read(&root->fs_info->async_delalloc_pages));
  3431. if (!trans)
  3432. flush = BTRFS_RESERVE_FLUSH_ALL;
  3433. else
  3434. flush = BTRFS_RESERVE_NO_FLUSH;
  3435. spin_lock(&space_info->lock);
  3436. if (can_overcommit(root, space_info, orig, flush)) {
  3437. spin_unlock(&space_info->lock);
  3438. break;
  3439. }
  3440. spin_unlock(&space_info->lock);
  3441. loops++;
  3442. if (wait_ordered && !trans) {
  3443. btrfs_wait_ordered_extents(root, 0);
  3444. } else {
  3445. time_left = schedule_timeout_killable(1);
  3446. if (time_left)
  3447. break;
  3448. }
  3449. smp_mb();
  3450. delalloc_bytes = percpu_counter_sum_positive(
  3451. &root->fs_info->delalloc_bytes);
  3452. }
  3453. }
  3454. /**
  3455. * maybe_commit_transaction - possibly commit the transaction if its ok to
  3456. * @root - the root we're allocating for
  3457. * @bytes - the number of bytes we want to reserve
  3458. * @force - force the commit
  3459. *
  3460. * This will check to make sure that committing the transaction will actually
  3461. * get us somewhere and then commit the transaction if it does. Otherwise it
  3462. * will return -ENOSPC.
  3463. */
  3464. static int may_commit_transaction(struct btrfs_root *root,
  3465. struct btrfs_space_info *space_info,
  3466. u64 bytes, int force)
  3467. {
  3468. struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
  3469. struct btrfs_trans_handle *trans;
  3470. trans = (struct btrfs_trans_handle *)current->journal_info;
  3471. if (trans)
  3472. return -EAGAIN;
  3473. if (force)
  3474. goto commit;
  3475. /* See if there is enough pinned space to make this reservation */
  3476. spin_lock(&space_info->lock);
  3477. if (space_info->bytes_pinned >= bytes) {
  3478. spin_unlock(&space_info->lock);
  3479. goto commit;
  3480. }
  3481. spin_unlock(&space_info->lock);
  3482. /*
  3483. * See if there is some space in the delayed insertion reservation for
  3484. * this reservation.
  3485. */
  3486. if (space_info != delayed_rsv->space_info)
  3487. return -ENOSPC;
  3488. spin_lock(&space_info->lock);
  3489. spin_lock(&delayed_rsv->lock);
  3490. if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
  3491. spin_unlock(&delayed_rsv->lock);
  3492. spin_unlock(&space_info->lock);
  3493. return -ENOSPC;
  3494. }
  3495. spin_unlock(&delayed_rsv->lock);
  3496. spin_unlock(&space_info->lock);
  3497. commit:
  3498. trans = btrfs_join_transaction(root);
  3499. if (IS_ERR(trans))
  3500. return -ENOSPC;
  3501. return btrfs_commit_transaction(trans, root);
  3502. }
  3503. enum flush_state {
  3504. FLUSH_DELAYED_ITEMS_NR = 1,
  3505. FLUSH_DELAYED_ITEMS = 2,
  3506. FLUSH_DELALLOC = 3,
  3507. FLUSH_DELALLOC_WAIT = 4,
  3508. ALLOC_CHUNK = 5,
  3509. COMMIT_TRANS = 6,
  3510. };
  3511. static int flush_space(struct btrfs_root *root,
  3512. struct btrfs_space_info *space_info, u64 num_bytes,
  3513. u64 orig_bytes, int state)
  3514. {
  3515. struct btrfs_trans_handle *trans;
  3516. int nr;
  3517. int ret = 0;
  3518. switch (state) {
  3519. case FLUSH_DELAYED_ITEMS_NR:
  3520. case FLUSH_DELAYED_ITEMS:
  3521. if (state == FLUSH_DELAYED_ITEMS_NR) {
  3522. u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
  3523. nr = (int)div64_u64(num_bytes, bytes);
  3524. if (!nr)
  3525. nr = 1;
  3526. nr *= 2;
  3527. } else {
  3528. nr = -1;
  3529. }
  3530. trans = btrfs_join_transaction(root);
  3531. if (IS_ERR(trans)) {
  3532. ret = PTR_ERR(trans);
  3533. break;
  3534. }
  3535. ret = btrfs_run_delayed_items_nr(trans, root, nr);
  3536. btrfs_end_transaction(trans, root);
  3537. break;
  3538. case FLUSH_DELALLOC:
  3539. case FLUSH_DELALLOC_WAIT:
  3540. shrink_delalloc(root, num_bytes, orig_bytes,
  3541. state == FLUSH_DELALLOC_WAIT);
  3542. break;
  3543. case ALLOC_CHUNK:
  3544. trans = btrfs_join_transaction(root);
  3545. if (IS_ERR(trans)) {
  3546. ret = PTR_ERR(trans);
  3547. break;
  3548. }
  3549. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  3550. btrfs_get_alloc_profile(root, 0),
  3551. CHUNK_ALLOC_NO_FORCE);
  3552. btrfs_end_transaction(trans, root);
  3553. if (ret == -ENOSPC)
  3554. ret = 0;
  3555. break;
  3556. case COMMIT_TRANS:
  3557. ret = may_commit_transaction(root, space_info, orig_bytes, 0);
  3558. break;
  3559. default:
  3560. ret = -ENOSPC;
  3561. break;
  3562. }
  3563. return ret;
  3564. }
  3565. /**
  3566. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  3567. * @root - the root we're allocating for
  3568. * @block_rsv - the block_rsv we're allocating for
  3569. * @orig_bytes - the number of bytes we want
  3570. * @flush - whether or not we can flush to make our reservation
  3571. *
  3572. * This will reserve orgi_bytes number of bytes from the space info associated
  3573. * with the block_rsv. If there is not enough space it will make an attempt to
  3574. * flush out space to make room. It will do this by flushing delalloc if
  3575. * possible or committing the transaction. If flush is 0 then no attempts to
  3576. * regain reservations will be made and this will fail if there is not enough
  3577. * space already.
  3578. */
  3579. static int reserve_metadata_bytes(struct btrfs_root *root,
  3580. struct btrfs_block_rsv *block_rsv,
  3581. u64 orig_bytes,
  3582. enum btrfs_reserve_flush_enum flush)
  3583. {
  3584. struct btrfs_space_info *space_info = block_rsv->space_info;
  3585. u64 used;
  3586. u64 num_bytes = orig_bytes;
  3587. int flush_state = FLUSH_DELAYED_ITEMS_NR;
  3588. int ret = 0;
  3589. bool flushing = false;
  3590. again:
  3591. ret = 0;
  3592. spin_lock(&space_info->lock);
  3593. /*
  3594. * We only want to wait if somebody other than us is flushing and we
  3595. * are actually allowed to flush all things.
  3596. */
  3597. while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
  3598. space_info->flush) {
  3599. spin_unlock(&space_info->lock);
  3600. /*
  3601. * If we have a trans handle we can't wait because the flusher
  3602. * may have to commit the transaction, which would mean we would
  3603. * deadlock since we are waiting for the flusher to finish, but
  3604. * hold the current transaction open.
  3605. */
  3606. if (current->journal_info)
  3607. return -EAGAIN;
  3608. ret = wait_event_killable(space_info->wait, !space_info->flush);
  3609. /* Must have been killed, return */
  3610. if (ret)
  3611. return -EINTR;
  3612. spin_lock(&space_info->lock);
  3613. }
  3614. ret = -ENOSPC;
  3615. used = space_info->bytes_used + space_info->bytes_reserved +
  3616. space_info->bytes_pinned + space_info->bytes_readonly +
  3617. space_info->bytes_may_use;
  3618. /*
  3619. * The idea here is that we've not already over-reserved the block group
  3620. * then we can go ahead and save our reservation first and then start
  3621. * flushing if we need to. Otherwise if we've already overcommitted
  3622. * lets start flushing stuff first and then come back and try to make
  3623. * our reservation.
  3624. */
  3625. if (used <= space_info->total_bytes) {
  3626. if (used + orig_bytes <= space_info->total_bytes) {
  3627. space_info->bytes_may_use += orig_bytes;
  3628. trace_btrfs_space_reservation(root->fs_info,
  3629. "space_info", space_info->flags, orig_bytes, 1);
  3630. ret = 0;
  3631. } else {
  3632. /*
  3633. * Ok set num_bytes to orig_bytes since we aren't
  3634. * overocmmitted, this way we only try and reclaim what
  3635. * we need.
  3636. */
  3637. num_bytes = orig_bytes;
  3638. }
  3639. } else {
  3640. /*
  3641. * Ok we're over committed, set num_bytes to the overcommitted
  3642. * amount plus the amount of bytes that we need for this
  3643. * reservation.
  3644. */
  3645. num_bytes = used - space_info->total_bytes +
  3646. (orig_bytes * 2);
  3647. }
  3648. if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
  3649. space_info->bytes_may_use += orig_bytes;
  3650. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3651. space_info->flags, orig_bytes,
  3652. 1);
  3653. ret = 0;
  3654. }
  3655. /*
  3656. * Couldn't make our reservation, save our place so while we're trying
  3657. * to reclaim space we can actually use it instead of somebody else
  3658. * stealing it from us.
  3659. *
  3660. * We make the other tasks wait for the flush only when we can flush
  3661. * all things.
  3662. */
  3663. if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
  3664. flushing = true;
  3665. space_info->flush = 1;
  3666. }
  3667. spin_unlock(&space_info->lock);
  3668. if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
  3669. goto out;
  3670. ret = flush_space(root, space_info, num_bytes, orig_bytes,
  3671. flush_state);
  3672. flush_state++;
  3673. /*
  3674. * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
  3675. * would happen. So skip delalloc flush.
  3676. */
  3677. if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
  3678. (flush_state == FLUSH_DELALLOC ||
  3679. flush_state == FLUSH_DELALLOC_WAIT))
  3680. flush_state = ALLOC_CHUNK;
  3681. if (!ret)
  3682. goto again;
  3683. else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
  3684. flush_state < COMMIT_TRANS)
  3685. goto again;
  3686. else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
  3687. flush_state <= COMMIT_TRANS)
  3688. goto again;
  3689. out:
  3690. if (ret == -ENOSPC &&
  3691. unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
  3692. struct btrfs_block_rsv *global_rsv =
  3693. &root->fs_info->global_block_rsv;
  3694. if (block_rsv != global_rsv &&
  3695. !block_rsv_use_bytes(global_rsv, orig_bytes))
  3696. ret = 0;
  3697. }
  3698. if (flushing) {
  3699. spin_lock(&space_info->lock);
  3700. space_info->flush = 0;
  3701. wake_up_all(&space_info->wait);
  3702. spin_unlock(&space_info->lock);
  3703. }
  3704. return ret;
  3705. }
  3706. static struct btrfs_block_rsv *get_block_rsv(
  3707. const struct btrfs_trans_handle *trans,
  3708. const struct btrfs_root *root)
  3709. {
  3710. struct btrfs_block_rsv *block_rsv = NULL;
  3711. if (root->ref_cows)
  3712. block_rsv = trans->block_rsv;
  3713. if (root == root->fs_info->csum_root && trans->adding_csums)
  3714. block_rsv = trans->block_rsv;
  3715. if (!block_rsv)
  3716. block_rsv = root->block_rsv;
  3717. if (!block_rsv)
  3718. block_rsv = &root->fs_info->empty_block_rsv;
  3719. return block_rsv;
  3720. }
  3721. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  3722. u64 num_bytes)
  3723. {
  3724. int ret = -ENOSPC;
  3725. spin_lock(&block_rsv->lock);
  3726. if (block_rsv->reserved >= num_bytes) {
  3727. block_rsv->reserved -= num_bytes;
  3728. if (block_rsv->reserved < block_rsv->size)
  3729. block_rsv->full = 0;
  3730. ret = 0;
  3731. }
  3732. spin_unlock(&block_rsv->lock);
  3733. return ret;
  3734. }
  3735. static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
  3736. u64 num_bytes, int update_size)
  3737. {
  3738. spin_lock(&block_rsv->lock);
  3739. block_rsv->reserved += num_bytes;
  3740. if (update_size)
  3741. block_rsv->size += num_bytes;
  3742. else if (block_rsv->reserved >= block_rsv->size)
  3743. block_rsv->full = 1;
  3744. spin_unlock(&block_rsv->lock);
  3745. }
  3746. static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
  3747. struct btrfs_block_rsv *block_rsv,
  3748. struct btrfs_block_rsv *dest, u64 num_bytes)
  3749. {
  3750. struct btrfs_space_info *space_info = block_rsv->space_info;
  3751. spin_lock(&block_rsv->lock);
  3752. if (num_bytes == (u64)-1)
  3753. num_bytes = block_rsv->size;
  3754. block_rsv->size -= num_bytes;
  3755. if (block_rsv->reserved >= block_rsv->size) {
  3756. num_bytes = block_rsv->reserved - block_rsv->size;
  3757. block_rsv->reserved = block_rsv->size;
  3758. block_rsv->full = 1;
  3759. } else {
  3760. num_bytes = 0;
  3761. }
  3762. spin_unlock(&block_rsv->lock);
  3763. if (num_bytes > 0) {
  3764. if (dest) {
  3765. spin_lock(&dest->lock);
  3766. if (!dest->full) {
  3767. u64 bytes_to_add;
  3768. bytes_to_add = dest->size - dest->reserved;
  3769. bytes_to_add = min(num_bytes, bytes_to_add);
  3770. dest->reserved += bytes_to_add;
  3771. if (dest->reserved >= dest->size)
  3772. dest->full = 1;
  3773. num_bytes -= bytes_to_add;
  3774. }
  3775. spin_unlock(&dest->lock);
  3776. }
  3777. if (num_bytes) {
  3778. spin_lock(&space_info->lock);
  3779. space_info->bytes_may_use -= num_bytes;
  3780. trace_btrfs_space_reservation(fs_info, "space_info",
  3781. space_info->flags, num_bytes, 0);
  3782. space_info->reservation_progress++;
  3783. spin_unlock(&space_info->lock);
  3784. }
  3785. }
  3786. }
  3787. static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
  3788. struct btrfs_block_rsv *dst, u64 num_bytes)
  3789. {
  3790. int ret;
  3791. ret = block_rsv_use_bytes(src, num_bytes);
  3792. if (ret)
  3793. return ret;
  3794. block_rsv_add_bytes(dst, num_bytes, 1);
  3795. return 0;
  3796. }
  3797. void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
  3798. {
  3799. memset(rsv, 0, sizeof(*rsv));
  3800. spin_lock_init(&rsv->lock);
  3801. rsv->type = type;
  3802. }
  3803. struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
  3804. unsigned short type)
  3805. {
  3806. struct btrfs_block_rsv *block_rsv;
  3807. struct btrfs_fs_info *fs_info = root->fs_info;
  3808. block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
  3809. if (!block_rsv)
  3810. return NULL;
  3811. btrfs_init_block_rsv(block_rsv, type);
  3812. block_rsv->space_info = __find_space_info(fs_info,
  3813. BTRFS_BLOCK_GROUP_METADATA);
  3814. return block_rsv;
  3815. }
  3816. void btrfs_free_block_rsv(struct btrfs_root *root,
  3817. struct btrfs_block_rsv *rsv)
  3818. {
  3819. if (!rsv)
  3820. return;
  3821. btrfs_block_rsv_release(root, rsv, (u64)-1);
  3822. kfree(rsv);
  3823. }
  3824. int btrfs_block_rsv_add(struct btrfs_root *root,
  3825. struct btrfs_block_rsv *block_rsv, u64 num_bytes,
  3826. enum btrfs_reserve_flush_enum flush)
  3827. {
  3828. int ret;
  3829. if (num_bytes == 0)
  3830. return 0;
  3831. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  3832. if (!ret) {
  3833. block_rsv_add_bytes(block_rsv, num_bytes, 1);
  3834. return 0;
  3835. }
  3836. return ret;
  3837. }
  3838. int btrfs_block_rsv_check(struct btrfs_root *root,
  3839. struct btrfs_block_rsv *block_rsv, int min_factor)
  3840. {
  3841. u64 num_bytes = 0;
  3842. int ret = -ENOSPC;
  3843. if (!block_rsv)
  3844. return 0;
  3845. spin_lock(&block_rsv->lock);
  3846. num_bytes = div_factor(block_rsv->size, min_factor);
  3847. if (block_rsv->reserved >= num_bytes)
  3848. ret = 0;
  3849. spin_unlock(&block_rsv->lock);
  3850. return ret;
  3851. }
  3852. int btrfs_block_rsv_refill(struct btrfs_root *root,
  3853. struct btrfs_block_rsv *block_rsv, u64 min_reserved,
  3854. enum btrfs_reserve_flush_enum flush)
  3855. {
  3856. u64 num_bytes = 0;
  3857. int ret = -ENOSPC;
  3858. if (!block_rsv)
  3859. return 0;
  3860. spin_lock(&block_rsv->lock);
  3861. num_bytes = min_reserved;
  3862. if (block_rsv->reserved >= num_bytes)
  3863. ret = 0;
  3864. else
  3865. num_bytes -= block_rsv->reserved;
  3866. spin_unlock(&block_rsv->lock);
  3867. if (!ret)
  3868. return 0;
  3869. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  3870. if (!ret) {
  3871. block_rsv_add_bytes(block_rsv, num_bytes, 0);
  3872. return 0;
  3873. }
  3874. return ret;
  3875. }
  3876. int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
  3877. struct btrfs_block_rsv *dst_rsv,
  3878. u64 num_bytes)
  3879. {
  3880. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  3881. }
  3882. void btrfs_block_rsv_release(struct btrfs_root *root,
  3883. struct btrfs_block_rsv *block_rsv,
  3884. u64 num_bytes)
  3885. {
  3886. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3887. if (global_rsv->full || global_rsv == block_rsv ||
  3888. block_rsv->space_info != global_rsv->space_info)
  3889. global_rsv = NULL;
  3890. block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
  3891. num_bytes);
  3892. }
  3893. /*
  3894. * helper to calculate size of global block reservation.
  3895. * the desired value is sum of space used by extent tree,
  3896. * checksum tree and root tree
  3897. */
  3898. static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
  3899. {
  3900. struct btrfs_space_info *sinfo;
  3901. u64 num_bytes;
  3902. u64 meta_used;
  3903. u64 data_used;
  3904. int csum_size = btrfs_super_csum_size(fs_info->super_copy);
  3905. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
  3906. spin_lock(&sinfo->lock);
  3907. data_used = sinfo->bytes_used;
  3908. spin_unlock(&sinfo->lock);
  3909. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3910. spin_lock(&sinfo->lock);
  3911. if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
  3912. data_used = 0;
  3913. meta_used = sinfo->bytes_used;
  3914. spin_unlock(&sinfo->lock);
  3915. num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
  3916. csum_size * 2;
  3917. num_bytes += div64_u64(data_used + meta_used, 50);
  3918. if (num_bytes * 3 > meta_used)
  3919. num_bytes = div64_u64(meta_used, 3);
  3920. return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
  3921. }
  3922. static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
  3923. {
  3924. struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
  3925. struct btrfs_space_info *sinfo = block_rsv->space_info;
  3926. u64 num_bytes;
  3927. num_bytes = calc_global_metadata_size(fs_info);
  3928. spin_lock(&sinfo->lock);
  3929. spin_lock(&block_rsv->lock);
  3930. block_rsv->size = num_bytes;
  3931. num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
  3932. sinfo->bytes_reserved + sinfo->bytes_readonly +
  3933. sinfo->bytes_may_use;
  3934. if (sinfo->total_bytes > num_bytes) {
  3935. num_bytes = sinfo->total_bytes - num_bytes;
  3936. block_rsv->reserved += num_bytes;
  3937. sinfo->bytes_may_use += num_bytes;
  3938. trace_btrfs_space_reservation(fs_info, "space_info",
  3939. sinfo->flags, num_bytes, 1);
  3940. }
  3941. if (block_rsv->reserved >= block_rsv->size) {
  3942. num_bytes = block_rsv->reserved - block_rsv->size;
  3943. sinfo->bytes_may_use -= num_bytes;
  3944. trace_btrfs_space_reservation(fs_info, "space_info",
  3945. sinfo->flags, num_bytes, 0);
  3946. sinfo->reservation_progress++;
  3947. block_rsv->reserved = block_rsv->size;
  3948. block_rsv->full = 1;
  3949. }
  3950. spin_unlock(&block_rsv->lock);
  3951. spin_unlock(&sinfo->lock);
  3952. }
  3953. static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
  3954. {
  3955. struct btrfs_space_info *space_info;
  3956. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3957. fs_info->chunk_block_rsv.space_info = space_info;
  3958. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3959. fs_info->global_block_rsv.space_info = space_info;
  3960. fs_info->delalloc_block_rsv.space_info = space_info;
  3961. fs_info->trans_block_rsv.space_info = space_info;
  3962. fs_info->empty_block_rsv.space_info = space_info;
  3963. fs_info->delayed_block_rsv.space_info = space_info;
  3964. fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
  3965. fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
  3966. fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
  3967. fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
  3968. fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
  3969. update_global_block_rsv(fs_info);
  3970. }
  3971. static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
  3972. {
  3973. block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
  3974. (u64)-1);
  3975. WARN_ON(fs_info->delalloc_block_rsv.size > 0);
  3976. WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
  3977. WARN_ON(fs_info->trans_block_rsv.size > 0);
  3978. WARN_ON(fs_info->trans_block_rsv.reserved > 0);
  3979. WARN_ON(fs_info->chunk_block_rsv.size > 0);
  3980. WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
  3981. WARN_ON(fs_info->delayed_block_rsv.size > 0);
  3982. WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
  3983. }
  3984. void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
  3985. struct btrfs_root *root)
  3986. {
  3987. if (!trans->block_rsv)
  3988. return;
  3989. if (!trans->bytes_reserved)
  3990. return;
  3991. trace_btrfs_space_reservation(root->fs_info, "transaction",
  3992. trans->transid, trans->bytes_reserved, 0);
  3993. btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
  3994. trans->bytes_reserved = 0;
  3995. }
  3996. /* Can only return 0 or -ENOSPC */
  3997. int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
  3998. struct inode *inode)
  3999. {
  4000. struct btrfs_root *root = BTRFS_I(inode)->root;
  4001. struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
  4002. struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
  4003. /*
  4004. * We need to hold space in order to delete our orphan item once we've
  4005. * added it, so this takes the reservation so we can release it later
  4006. * when we are truly done with the orphan item.
  4007. */
  4008. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  4009. trace_btrfs_space_reservation(root->fs_info, "orphan",
  4010. btrfs_ino(inode), num_bytes, 1);
  4011. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  4012. }
  4013. void btrfs_orphan_release_metadata(struct inode *inode)
  4014. {
  4015. struct btrfs_root *root = BTRFS_I(inode)->root;
  4016. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  4017. trace_btrfs_space_reservation(root->fs_info, "orphan",
  4018. btrfs_ino(inode), num_bytes, 0);
  4019. btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
  4020. }
  4021. /*
  4022. * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
  4023. * root: the root of the parent directory
  4024. * rsv: block reservation
  4025. * items: the number of items that we need do reservation
  4026. * qgroup_reserved: used to return the reserved size in qgroup
  4027. *
  4028. * This function is used to reserve the space for snapshot/subvolume
  4029. * creation and deletion. Those operations are different with the
  4030. * common file/directory operations, they change two fs/file trees
  4031. * and root tree, the number of items that the qgroup reserves is
  4032. * different with the free space reservation. So we can not use
  4033. * the space reseravtion mechanism in start_transaction().
  4034. */
  4035. int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
  4036. struct btrfs_block_rsv *rsv,
  4037. int items,
  4038. u64 *qgroup_reserved)
  4039. {
  4040. u64 num_bytes;
  4041. int ret;
  4042. if (root->fs_info->quota_enabled) {
  4043. /* One for parent inode, two for dir entries */
  4044. num_bytes = 3 * root->leafsize;
  4045. ret = btrfs_qgroup_reserve(root, num_bytes);
  4046. if (ret)
  4047. return ret;
  4048. } else {
  4049. num_bytes = 0;
  4050. }
  4051. *qgroup_reserved = num_bytes;
  4052. num_bytes = btrfs_calc_trans_metadata_size(root, items);
  4053. rsv->space_info = __find_space_info(root->fs_info,
  4054. BTRFS_BLOCK_GROUP_METADATA);
  4055. ret = btrfs_block_rsv_add(root, rsv, num_bytes,
  4056. BTRFS_RESERVE_FLUSH_ALL);
  4057. if (ret) {
  4058. if (*qgroup_reserved)
  4059. btrfs_qgroup_free(root, *qgroup_reserved);
  4060. }
  4061. return ret;
  4062. }
  4063. void btrfs_subvolume_release_metadata(struct btrfs_root *root,
  4064. struct btrfs_block_rsv *rsv,
  4065. u64 qgroup_reserved)
  4066. {
  4067. btrfs_block_rsv_release(root, rsv, (u64)-1);
  4068. if (qgroup_reserved)
  4069. btrfs_qgroup_free(root, qgroup_reserved);
  4070. }
  4071. /**
  4072. * drop_outstanding_extent - drop an outstanding extent
  4073. * @inode: the inode we're dropping the extent for
  4074. *
  4075. * This is called when we are freeing up an outstanding extent, either called
  4076. * after an error or after an extent is written. This will return the number of
  4077. * reserved extents that need to be freed. This must be called with
  4078. * BTRFS_I(inode)->lock held.
  4079. */
  4080. static unsigned drop_outstanding_extent(struct inode *inode)
  4081. {
  4082. unsigned drop_inode_space = 0;
  4083. unsigned dropped_extents = 0;
  4084. BUG_ON(!BTRFS_I(inode)->outstanding_extents);
  4085. BTRFS_I(inode)->outstanding_extents--;
  4086. if (BTRFS_I(inode)->outstanding_extents == 0 &&
  4087. test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  4088. &BTRFS_I(inode)->runtime_flags))
  4089. drop_inode_space = 1;
  4090. /*
  4091. * If we have more or the same amount of outsanding extents than we have
  4092. * reserved then we need to leave the reserved extents count alone.
  4093. */
  4094. if (BTRFS_I(inode)->outstanding_extents >=
  4095. BTRFS_I(inode)->reserved_extents)
  4096. return drop_inode_space;
  4097. dropped_extents = BTRFS_I(inode)->reserved_extents -
  4098. BTRFS_I(inode)->outstanding_extents;
  4099. BTRFS_I(inode)->reserved_extents -= dropped_extents;
  4100. return dropped_extents + drop_inode_space;
  4101. }
  4102. /**
  4103. * calc_csum_metadata_size - return the amount of metada space that must be
  4104. * reserved/free'd for the given bytes.
  4105. * @inode: the inode we're manipulating
  4106. * @num_bytes: the number of bytes in question
  4107. * @reserve: 1 if we are reserving space, 0 if we are freeing space
  4108. *
  4109. * This adjusts the number of csum_bytes in the inode and then returns the
  4110. * correct amount of metadata that must either be reserved or freed. We
  4111. * calculate how many checksums we can fit into one leaf and then divide the
  4112. * number of bytes that will need to be checksumed by this value to figure out
  4113. * how many checksums will be required. If we are adding bytes then the number
  4114. * may go up and we will return the number of additional bytes that must be
  4115. * reserved. If it is going down we will return the number of bytes that must
  4116. * be freed.
  4117. *
  4118. * This must be called with BTRFS_I(inode)->lock held.
  4119. */
  4120. static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
  4121. int reserve)
  4122. {
  4123. struct btrfs_root *root = BTRFS_I(inode)->root;
  4124. u64 csum_size;
  4125. int num_csums_per_leaf;
  4126. int num_csums;
  4127. int old_csums;
  4128. if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
  4129. BTRFS_I(inode)->csum_bytes == 0)
  4130. return 0;
  4131. old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
  4132. if (reserve)
  4133. BTRFS_I(inode)->csum_bytes += num_bytes;
  4134. else
  4135. BTRFS_I(inode)->csum_bytes -= num_bytes;
  4136. csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
  4137. num_csums_per_leaf = (int)div64_u64(csum_size,
  4138. sizeof(struct btrfs_csum_item) +
  4139. sizeof(struct btrfs_disk_key));
  4140. num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
  4141. num_csums = num_csums + num_csums_per_leaf - 1;
  4142. num_csums = num_csums / num_csums_per_leaf;
  4143. old_csums = old_csums + num_csums_per_leaf - 1;
  4144. old_csums = old_csums / num_csums_per_leaf;
  4145. /* No change, no need to reserve more */
  4146. if (old_csums == num_csums)
  4147. return 0;
  4148. if (reserve)
  4149. return btrfs_calc_trans_metadata_size(root,
  4150. num_csums - old_csums);
  4151. return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
  4152. }
  4153. int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
  4154. {
  4155. struct btrfs_root *root = BTRFS_I(inode)->root;
  4156. struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
  4157. u64 to_reserve = 0;
  4158. u64 csum_bytes;
  4159. unsigned nr_extents = 0;
  4160. int extra_reserve = 0;
  4161. enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
  4162. int ret = 0;
  4163. bool delalloc_lock = true;
  4164. u64 to_free = 0;
  4165. unsigned dropped;
  4166. /* If we are a free space inode we need to not flush since we will be in
  4167. * the middle of a transaction commit. We also don't need the delalloc
  4168. * mutex since we won't race with anybody. We need this mostly to make
  4169. * lockdep shut its filthy mouth.
  4170. */
  4171. if (btrfs_is_free_space_inode(inode)) {
  4172. flush = BTRFS_RESERVE_NO_FLUSH;
  4173. delalloc_lock = false;
  4174. }
  4175. if (flush != BTRFS_RESERVE_NO_FLUSH &&
  4176. btrfs_transaction_in_commit(root->fs_info))
  4177. schedule_timeout(1);
  4178. if (delalloc_lock)
  4179. mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
  4180. num_bytes = ALIGN(num_bytes, root->sectorsize);
  4181. spin_lock(&BTRFS_I(inode)->lock);
  4182. BTRFS_I(inode)->outstanding_extents++;
  4183. if (BTRFS_I(inode)->outstanding_extents >
  4184. BTRFS_I(inode)->reserved_extents)
  4185. nr_extents = BTRFS_I(inode)->outstanding_extents -
  4186. BTRFS_I(inode)->reserved_extents;
  4187. /*
  4188. * Add an item to reserve for updating the inode when we complete the
  4189. * delalloc io.
  4190. */
  4191. if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  4192. &BTRFS_I(inode)->runtime_flags)) {
  4193. nr_extents++;
  4194. extra_reserve = 1;
  4195. }
  4196. to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
  4197. to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
  4198. csum_bytes = BTRFS_I(inode)->csum_bytes;
  4199. spin_unlock(&BTRFS_I(inode)->lock);
  4200. if (root->fs_info->quota_enabled) {
  4201. ret = btrfs_qgroup_reserve(root, num_bytes +
  4202. nr_extents * root->leafsize);
  4203. if (ret)
  4204. goto out_fail;
  4205. }
  4206. ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
  4207. if (unlikely(ret)) {
  4208. if (root->fs_info->quota_enabled)
  4209. btrfs_qgroup_free(root, num_bytes +
  4210. nr_extents * root->leafsize);
  4211. goto out_fail;
  4212. }
  4213. spin_lock(&BTRFS_I(inode)->lock);
  4214. if (extra_reserve) {
  4215. set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  4216. &BTRFS_I(inode)->runtime_flags);
  4217. nr_extents--;
  4218. }
  4219. BTRFS_I(inode)->reserved_extents += nr_extents;
  4220. spin_unlock(&BTRFS_I(inode)->lock);
  4221. if (delalloc_lock)
  4222. mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  4223. if (to_reserve)
  4224. trace_btrfs_space_reservation(root->fs_info,"delalloc",
  4225. btrfs_ino(inode), to_reserve, 1);
  4226. block_rsv_add_bytes(block_rsv, to_reserve, 1);
  4227. return 0;
  4228. out_fail:
  4229. spin_lock(&BTRFS_I(inode)->lock);
  4230. dropped = drop_outstanding_extent(inode);
  4231. /*
  4232. * If the inodes csum_bytes is the same as the original
  4233. * csum_bytes then we know we haven't raced with any free()ers
  4234. * so we can just reduce our inodes csum bytes and carry on.
  4235. * Otherwise we have to do the normal free thing to account for
  4236. * the case that the free side didn't free up its reserve
  4237. * because of this outstanding reservation.
  4238. */
  4239. if (BTRFS_I(inode)->csum_bytes == csum_bytes)
  4240. calc_csum_metadata_size(inode, num_bytes, 0);
  4241. else
  4242. to_free = calc_csum_metadata_size(inode, num_bytes, 0);
  4243. spin_unlock(&BTRFS_I(inode)->lock);
  4244. if (dropped)
  4245. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  4246. if (to_free) {
  4247. btrfs_block_rsv_release(root, block_rsv, to_free);
  4248. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  4249. btrfs_ino(inode), to_free, 0);
  4250. }
  4251. if (delalloc_lock)
  4252. mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  4253. return ret;
  4254. }
  4255. /**
  4256. * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
  4257. * @inode: the inode to release the reservation for
  4258. * @num_bytes: the number of bytes we're releasing
  4259. *
  4260. * This will release the metadata reservation for an inode. This can be called
  4261. * once we complete IO for a given set of bytes to release their metadata
  4262. * reservations.
  4263. */
  4264. void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
  4265. {
  4266. struct btrfs_root *root = BTRFS_I(inode)->root;
  4267. u64 to_free = 0;
  4268. unsigned dropped;
  4269. num_bytes = ALIGN(num_bytes, root->sectorsize);
  4270. spin_lock(&BTRFS_I(inode)->lock);
  4271. dropped = drop_outstanding_extent(inode);
  4272. if (num_bytes)
  4273. to_free = calc_csum_metadata_size(inode, num_bytes, 0);
  4274. spin_unlock(&BTRFS_I(inode)->lock);
  4275. if (dropped > 0)
  4276. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  4277. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  4278. btrfs_ino(inode), to_free, 0);
  4279. if (root->fs_info->quota_enabled) {
  4280. btrfs_qgroup_free(root, num_bytes +
  4281. dropped * root->leafsize);
  4282. }
  4283. btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
  4284. to_free);
  4285. }
  4286. /**
  4287. * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
  4288. * @inode: inode we're writing to
  4289. * @num_bytes: the number of bytes we want to allocate
  4290. *
  4291. * This will do the following things
  4292. *
  4293. * o reserve space in the data space info for num_bytes
  4294. * o reserve space in the metadata space info based on number of outstanding
  4295. * extents and how much csums will be needed
  4296. * o add to the inodes ->delalloc_bytes
  4297. * o add it to the fs_info's delalloc inodes list.
  4298. *
  4299. * This will return 0 for success and -ENOSPC if there is no space left.
  4300. */
  4301. int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
  4302. {
  4303. int ret;
  4304. ret = btrfs_check_data_free_space(inode, num_bytes);
  4305. if (ret)
  4306. return ret;
  4307. ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
  4308. if (ret) {
  4309. btrfs_free_reserved_data_space(inode, num_bytes);
  4310. return ret;
  4311. }
  4312. return 0;
  4313. }
  4314. /**
  4315. * btrfs_delalloc_release_space - release data and metadata space for delalloc
  4316. * @inode: inode we're releasing space for
  4317. * @num_bytes: the number of bytes we want to free up
  4318. *
  4319. * This must be matched with a call to btrfs_delalloc_reserve_space. This is
  4320. * called in the case that we don't need the metadata AND data reservations
  4321. * anymore. So if there is an error or we insert an inline extent.
  4322. *
  4323. * This function will release the metadata space that was not used and will
  4324. * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
  4325. * list if there are no delalloc bytes left.
  4326. */
  4327. void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
  4328. {
  4329. btrfs_delalloc_release_metadata(inode, num_bytes);
  4330. btrfs_free_reserved_data_space(inode, num_bytes);
  4331. }
  4332. static int update_block_group(struct btrfs_root *root,
  4333. u64 bytenr, u64 num_bytes, int alloc)
  4334. {
  4335. struct btrfs_block_group_cache *cache = NULL;
  4336. struct btrfs_fs_info *info = root->fs_info;
  4337. u64 total = num_bytes;
  4338. u64 old_val;
  4339. u64 byte_in_group;
  4340. int factor;
  4341. /* block accounting for super block */
  4342. spin_lock(&info->delalloc_lock);
  4343. old_val = btrfs_super_bytes_used(info->super_copy);
  4344. if (alloc)
  4345. old_val += num_bytes;
  4346. else
  4347. old_val -= num_bytes;
  4348. btrfs_set_super_bytes_used(info->super_copy, old_val);
  4349. spin_unlock(&info->delalloc_lock);
  4350. while (total) {
  4351. cache = btrfs_lookup_block_group(info, bytenr);
  4352. if (!cache)
  4353. return -ENOENT;
  4354. if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
  4355. BTRFS_BLOCK_GROUP_RAID1 |
  4356. BTRFS_BLOCK_GROUP_RAID10))
  4357. factor = 2;
  4358. else
  4359. factor = 1;
  4360. /*
  4361. * If this block group has free space cache written out, we
  4362. * need to make sure to load it if we are removing space. This
  4363. * is because we need the unpinning stage to actually add the
  4364. * space back to the block group, otherwise we will leak space.
  4365. */
  4366. if (!alloc && cache->cached == BTRFS_CACHE_NO)
  4367. cache_block_group(cache, 1);
  4368. byte_in_group = bytenr - cache->key.objectid;
  4369. WARN_ON(byte_in_group > cache->key.offset);
  4370. spin_lock(&cache->space_info->lock);
  4371. spin_lock(&cache->lock);
  4372. if (btrfs_test_opt(root, SPACE_CACHE) &&
  4373. cache->disk_cache_state < BTRFS_DC_CLEAR)
  4374. cache->disk_cache_state = BTRFS_DC_CLEAR;
  4375. cache->dirty = 1;
  4376. old_val = btrfs_block_group_used(&cache->item);
  4377. num_bytes = min(total, cache->key.offset - byte_in_group);
  4378. if (alloc) {
  4379. old_val += num_bytes;
  4380. btrfs_set_block_group_used(&cache->item, old_val);
  4381. cache->reserved -= num_bytes;
  4382. cache->space_info->bytes_reserved -= num_bytes;
  4383. cache->space_info->bytes_used += num_bytes;
  4384. cache->space_info->disk_used += num_bytes * factor;
  4385. spin_unlock(&cache->lock);
  4386. spin_unlock(&cache->space_info->lock);
  4387. } else {
  4388. old_val -= num_bytes;
  4389. btrfs_set_block_group_used(&cache->item, old_val);
  4390. cache->pinned += num_bytes;
  4391. cache->space_info->bytes_pinned += num_bytes;
  4392. cache->space_info->bytes_used -= num_bytes;
  4393. cache->space_info->disk_used -= num_bytes * factor;
  4394. spin_unlock(&cache->lock);
  4395. spin_unlock(&cache->space_info->lock);
  4396. set_extent_dirty(info->pinned_extents,
  4397. bytenr, bytenr + num_bytes - 1,
  4398. GFP_NOFS | __GFP_NOFAIL);
  4399. }
  4400. btrfs_put_block_group(cache);
  4401. total -= num_bytes;
  4402. bytenr += num_bytes;
  4403. }
  4404. return 0;
  4405. }
  4406. static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
  4407. {
  4408. struct btrfs_block_group_cache *cache;
  4409. u64 bytenr;
  4410. spin_lock(&root->fs_info->block_group_cache_lock);
  4411. bytenr = root->fs_info->first_logical_byte;
  4412. spin_unlock(&root->fs_info->block_group_cache_lock);
  4413. if (bytenr < (u64)-1)
  4414. return bytenr;
  4415. cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
  4416. if (!cache)
  4417. return 0;
  4418. bytenr = cache->key.objectid;
  4419. btrfs_put_block_group(cache);
  4420. return bytenr;
  4421. }
  4422. static int pin_down_extent(struct btrfs_root *root,
  4423. struct btrfs_block_group_cache *cache,
  4424. u64 bytenr, u64 num_bytes, int reserved)
  4425. {
  4426. spin_lock(&cache->space_info->lock);
  4427. spin_lock(&cache->lock);
  4428. cache->pinned += num_bytes;
  4429. cache->space_info->bytes_pinned += num_bytes;
  4430. if (reserved) {
  4431. cache->reserved -= num_bytes;
  4432. cache->space_info->bytes_reserved -= num_bytes;
  4433. }
  4434. spin_unlock(&cache->lock);
  4435. spin_unlock(&cache->space_info->lock);
  4436. set_extent_dirty(root->fs_info->pinned_extents, bytenr,
  4437. bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
  4438. return 0;
  4439. }
  4440. /*
  4441. * this function must be called within transaction
  4442. */
  4443. int btrfs_pin_extent(struct btrfs_root *root,
  4444. u64 bytenr, u64 num_bytes, int reserved)
  4445. {
  4446. struct btrfs_block_group_cache *cache;
  4447. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  4448. BUG_ON(!cache); /* Logic error */
  4449. pin_down_extent(root, cache, bytenr, num_bytes, reserved);
  4450. btrfs_put_block_group(cache);
  4451. return 0;
  4452. }
  4453. /*
  4454. * this function must be called within transaction
  4455. */
  4456. int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
  4457. u64 bytenr, u64 num_bytes)
  4458. {
  4459. struct btrfs_block_group_cache *cache;
  4460. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  4461. BUG_ON(!cache); /* Logic error */
  4462. /*
  4463. * pull in the free space cache (if any) so that our pin
  4464. * removes the free space from the cache. We have load_only set
  4465. * to one because the slow code to read in the free extents does check
  4466. * the pinned extents.
  4467. */
  4468. cache_block_group(cache, 1);
  4469. pin_down_extent(root, cache, bytenr, num_bytes, 0);
  4470. /* remove us from the free space cache (if we're there at all) */
  4471. btrfs_remove_free_space(cache, bytenr, num_bytes);
  4472. btrfs_put_block_group(cache);
  4473. return 0;
  4474. }
  4475. /**
  4476. * btrfs_update_reserved_bytes - update the block_group and space info counters
  4477. * @cache: The cache we are manipulating
  4478. * @num_bytes: The number of bytes in question
  4479. * @reserve: One of the reservation enums
  4480. *
  4481. * This is called by the allocator when it reserves space, or by somebody who is
  4482. * freeing space that was never actually used on disk. For example if you
  4483. * reserve some space for a new leaf in transaction A and before transaction A
  4484. * commits you free that leaf, you call this with reserve set to 0 in order to
  4485. * clear the reservation.
  4486. *
  4487. * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
  4488. * ENOSPC accounting. For data we handle the reservation through clearing the
  4489. * delalloc bits in the io_tree. We have to do this since we could end up
  4490. * allocating less disk space for the amount of data we have reserved in the
  4491. * case of compression.
  4492. *
  4493. * If this is a reservation and the block group has become read only we cannot
  4494. * make the reservation and return -EAGAIN, otherwise this function always
  4495. * succeeds.
  4496. */
  4497. static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
  4498. u64 num_bytes, int reserve)
  4499. {
  4500. struct btrfs_space_info *space_info = cache->space_info;
  4501. int ret = 0;
  4502. spin_lock(&space_info->lock);
  4503. spin_lock(&cache->lock);
  4504. if (reserve != RESERVE_FREE) {
  4505. if (cache->ro) {
  4506. ret = -EAGAIN;
  4507. } else {
  4508. cache->reserved += num_bytes;
  4509. space_info->bytes_reserved += num_bytes;
  4510. if (reserve == RESERVE_ALLOC) {
  4511. trace_btrfs_space_reservation(cache->fs_info,
  4512. "space_info", space_info->flags,
  4513. num_bytes, 0);
  4514. space_info->bytes_may_use -= num_bytes;
  4515. }
  4516. }
  4517. } else {
  4518. if (cache->ro)
  4519. space_info->bytes_readonly += num_bytes;
  4520. cache->reserved -= num_bytes;
  4521. space_info->bytes_reserved -= num_bytes;
  4522. space_info->reservation_progress++;
  4523. }
  4524. spin_unlock(&cache->lock);
  4525. spin_unlock(&space_info->lock);
  4526. return ret;
  4527. }
  4528. void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
  4529. struct btrfs_root *root)
  4530. {
  4531. struct btrfs_fs_info *fs_info = root->fs_info;
  4532. struct btrfs_caching_control *next;
  4533. struct btrfs_caching_control *caching_ctl;
  4534. struct btrfs_block_group_cache *cache;
  4535. down_write(&fs_info->extent_commit_sem);
  4536. list_for_each_entry_safe(caching_ctl, next,
  4537. &fs_info->caching_block_groups, list) {
  4538. cache = caching_ctl->block_group;
  4539. if (block_group_cache_done(cache)) {
  4540. cache->last_byte_to_unpin = (u64)-1;
  4541. list_del_init(&caching_ctl->list);
  4542. put_caching_control(caching_ctl);
  4543. } else {
  4544. cache->last_byte_to_unpin = caching_ctl->progress;
  4545. }
  4546. }
  4547. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  4548. fs_info->pinned_extents = &fs_info->freed_extents[1];
  4549. else
  4550. fs_info->pinned_extents = &fs_info->freed_extents[0];
  4551. up_write(&fs_info->extent_commit_sem);
  4552. update_global_block_rsv(fs_info);
  4553. }
  4554. static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  4555. {
  4556. struct btrfs_fs_info *fs_info = root->fs_info;
  4557. struct btrfs_block_group_cache *cache = NULL;
  4558. struct btrfs_space_info *space_info;
  4559. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4560. u64 len;
  4561. bool readonly;
  4562. while (start <= end) {
  4563. readonly = false;
  4564. if (!cache ||
  4565. start >= cache->key.objectid + cache->key.offset) {
  4566. if (cache)
  4567. btrfs_put_block_group(cache);
  4568. cache = btrfs_lookup_block_group(fs_info, start);
  4569. BUG_ON(!cache); /* Logic error */
  4570. }
  4571. len = cache->key.objectid + cache->key.offset - start;
  4572. len = min(len, end + 1 - start);
  4573. if (start < cache->last_byte_to_unpin) {
  4574. len = min(len, cache->last_byte_to_unpin - start);
  4575. btrfs_add_free_space(cache, start, len);
  4576. }
  4577. start += len;
  4578. space_info = cache->space_info;
  4579. spin_lock(&space_info->lock);
  4580. spin_lock(&cache->lock);
  4581. cache->pinned -= len;
  4582. space_info->bytes_pinned -= len;
  4583. if (cache->ro) {
  4584. space_info->bytes_readonly += len;
  4585. readonly = true;
  4586. }
  4587. spin_unlock(&cache->lock);
  4588. if (!readonly && global_rsv->space_info == space_info) {
  4589. spin_lock(&global_rsv->lock);
  4590. if (!global_rsv->full) {
  4591. len = min(len, global_rsv->size -
  4592. global_rsv->reserved);
  4593. global_rsv->reserved += len;
  4594. space_info->bytes_may_use += len;
  4595. if (global_rsv->reserved >= global_rsv->size)
  4596. global_rsv->full = 1;
  4597. }
  4598. spin_unlock(&global_rsv->lock);
  4599. }
  4600. spin_unlock(&space_info->lock);
  4601. }
  4602. if (cache)
  4603. btrfs_put_block_group(cache);
  4604. return 0;
  4605. }
  4606. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  4607. struct btrfs_root *root)
  4608. {
  4609. struct btrfs_fs_info *fs_info = root->fs_info;
  4610. struct extent_io_tree *unpin;
  4611. u64 start;
  4612. u64 end;
  4613. int ret;
  4614. if (trans->aborted)
  4615. return 0;
  4616. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  4617. unpin = &fs_info->freed_extents[1];
  4618. else
  4619. unpin = &fs_info->freed_extents[0];
  4620. while (1) {
  4621. ret = find_first_extent_bit(unpin, 0, &start, &end,
  4622. EXTENT_DIRTY, NULL);
  4623. if (ret)
  4624. break;
  4625. if (btrfs_test_opt(root, DISCARD))
  4626. ret = btrfs_discard_extent(root, start,
  4627. end + 1 - start, NULL);
  4628. clear_extent_dirty(unpin, start, end, GFP_NOFS);
  4629. unpin_extent_range(root, start, end);
  4630. cond_resched();
  4631. }
  4632. return 0;
  4633. }
  4634. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  4635. struct btrfs_root *root,
  4636. u64 bytenr, u64 num_bytes, u64 parent,
  4637. u64 root_objectid, u64 owner_objectid,
  4638. u64 owner_offset, int refs_to_drop,
  4639. struct btrfs_delayed_extent_op *extent_op)
  4640. {
  4641. struct btrfs_key key;
  4642. struct btrfs_path *path;
  4643. struct btrfs_fs_info *info = root->fs_info;
  4644. struct btrfs_root *extent_root = info->extent_root;
  4645. struct extent_buffer *leaf;
  4646. struct btrfs_extent_item *ei;
  4647. struct btrfs_extent_inline_ref *iref;
  4648. int ret;
  4649. int is_data;
  4650. int extent_slot = 0;
  4651. int found_extent = 0;
  4652. int num_to_del = 1;
  4653. u32 item_size;
  4654. u64 refs;
  4655. path = btrfs_alloc_path();
  4656. if (!path)
  4657. return -ENOMEM;
  4658. path->reada = 1;
  4659. path->leave_spinning = 1;
  4660. is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
  4661. BUG_ON(!is_data && refs_to_drop != 1);
  4662. ret = lookup_extent_backref(trans, extent_root, path, &iref,
  4663. bytenr, num_bytes, parent,
  4664. root_objectid, owner_objectid,
  4665. owner_offset);
  4666. if (ret == 0) {
  4667. extent_slot = path->slots[0];
  4668. while (extent_slot >= 0) {
  4669. btrfs_item_key_to_cpu(path->nodes[0], &key,
  4670. extent_slot);
  4671. if (key.objectid != bytenr)
  4672. break;
  4673. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  4674. key.offset == num_bytes) {
  4675. found_extent = 1;
  4676. break;
  4677. }
  4678. if (path->slots[0] - extent_slot > 5)
  4679. break;
  4680. extent_slot--;
  4681. }
  4682. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  4683. item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
  4684. if (found_extent && item_size < sizeof(*ei))
  4685. found_extent = 0;
  4686. #endif
  4687. if (!found_extent) {
  4688. BUG_ON(iref);
  4689. ret = remove_extent_backref(trans, extent_root, path,
  4690. NULL, refs_to_drop,
  4691. is_data);
  4692. if (ret) {
  4693. btrfs_abort_transaction(trans, extent_root, ret);
  4694. goto out;
  4695. }
  4696. btrfs_release_path(path);
  4697. path->leave_spinning = 1;
  4698. key.objectid = bytenr;
  4699. key.type = BTRFS_EXTENT_ITEM_KEY;
  4700. key.offset = num_bytes;
  4701. ret = btrfs_search_slot(trans, extent_root,
  4702. &key, path, -1, 1);
  4703. if (ret) {
  4704. printk(KERN_ERR "umm, got %d back from search"
  4705. ", was looking for %llu\n", ret,
  4706. (unsigned long long)bytenr);
  4707. if (ret > 0)
  4708. btrfs_print_leaf(extent_root,
  4709. path->nodes[0]);
  4710. }
  4711. if (ret < 0) {
  4712. btrfs_abort_transaction(trans, extent_root, ret);
  4713. goto out;
  4714. }
  4715. extent_slot = path->slots[0];
  4716. }
  4717. } else if (ret == -ENOENT) {
  4718. btrfs_print_leaf(extent_root, path->nodes[0]);
  4719. WARN_ON(1);
  4720. printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
  4721. "parent %llu root %llu owner %llu offset %llu\n",
  4722. (unsigned long long)bytenr,
  4723. (unsigned long long)parent,
  4724. (unsigned long long)root_objectid,
  4725. (unsigned long long)owner_objectid,
  4726. (unsigned long long)owner_offset);
  4727. } else {
  4728. btrfs_abort_transaction(trans, extent_root, ret);
  4729. goto out;
  4730. }
  4731. leaf = path->nodes[0];
  4732. item_size = btrfs_item_size_nr(leaf, extent_slot);
  4733. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  4734. if (item_size < sizeof(*ei)) {
  4735. BUG_ON(found_extent || extent_slot != path->slots[0]);
  4736. ret = convert_extent_item_v0(trans, extent_root, path,
  4737. owner_objectid, 0);
  4738. if (ret < 0) {
  4739. btrfs_abort_transaction(trans, extent_root, ret);
  4740. goto out;
  4741. }
  4742. btrfs_release_path(path);
  4743. path->leave_spinning = 1;
  4744. key.objectid = bytenr;
  4745. key.type = BTRFS_EXTENT_ITEM_KEY;
  4746. key.offset = num_bytes;
  4747. ret = btrfs_search_slot(trans, extent_root, &key, path,
  4748. -1, 1);
  4749. if (ret) {
  4750. printk(KERN_ERR "umm, got %d back from search"
  4751. ", was looking for %llu\n", ret,
  4752. (unsigned long long)bytenr);
  4753. btrfs_print_leaf(extent_root, path->nodes[0]);
  4754. }
  4755. if (ret < 0) {
  4756. btrfs_abort_transaction(trans, extent_root, ret);
  4757. goto out;
  4758. }
  4759. extent_slot = path->slots[0];
  4760. leaf = path->nodes[0];
  4761. item_size = btrfs_item_size_nr(leaf, extent_slot);
  4762. }
  4763. #endif
  4764. BUG_ON(item_size < sizeof(*ei));
  4765. ei = btrfs_item_ptr(leaf, extent_slot,
  4766. struct btrfs_extent_item);
  4767. if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
  4768. struct btrfs_tree_block_info *bi;
  4769. BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
  4770. bi = (struct btrfs_tree_block_info *)(ei + 1);
  4771. WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
  4772. }
  4773. refs = btrfs_extent_refs(leaf, ei);
  4774. BUG_ON(refs < refs_to_drop);
  4775. refs -= refs_to_drop;
  4776. if (refs > 0) {
  4777. if (extent_op)
  4778. __run_delayed_extent_op(extent_op, leaf, ei);
  4779. /*
  4780. * In the case of inline back ref, reference count will
  4781. * be updated by remove_extent_backref
  4782. */
  4783. if (iref) {
  4784. BUG_ON(!found_extent);
  4785. } else {
  4786. btrfs_set_extent_refs(leaf, ei, refs);
  4787. btrfs_mark_buffer_dirty(leaf);
  4788. }
  4789. if (found_extent) {
  4790. ret = remove_extent_backref(trans, extent_root, path,
  4791. iref, refs_to_drop,
  4792. is_data);
  4793. if (ret) {
  4794. btrfs_abort_transaction(trans, extent_root, ret);
  4795. goto out;
  4796. }
  4797. }
  4798. } else {
  4799. if (found_extent) {
  4800. BUG_ON(is_data && refs_to_drop !=
  4801. extent_data_ref_count(root, path, iref));
  4802. if (iref) {
  4803. BUG_ON(path->slots[0] != extent_slot);
  4804. } else {
  4805. BUG_ON(path->slots[0] != extent_slot + 1);
  4806. path->slots[0] = extent_slot;
  4807. num_to_del = 2;
  4808. }
  4809. }
  4810. ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
  4811. num_to_del);
  4812. if (ret) {
  4813. btrfs_abort_transaction(trans, extent_root, ret);
  4814. goto out;
  4815. }
  4816. btrfs_release_path(path);
  4817. if (is_data) {
  4818. ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
  4819. if (ret) {
  4820. btrfs_abort_transaction(trans, extent_root, ret);
  4821. goto out;
  4822. }
  4823. }
  4824. ret = update_block_group(root, bytenr, num_bytes, 0);
  4825. if (ret) {
  4826. btrfs_abort_transaction(trans, extent_root, ret);
  4827. goto out;
  4828. }
  4829. }
  4830. out:
  4831. btrfs_free_path(path);
  4832. return ret;
  4833. }
  4834. /*
  4835. * when we free an block, it is possible (and likely) that we free the last
  4836. * delayed ref for that extent as well. This searches the delayed ref tree for
  4837. * a given extent, and if there are no other delayed refs to be processed, it
  4838. * removes it from the tree.
  4839. */
  4840. static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
  4841. struct btrfs_root *root, u64 bytenr)
  4842. {
  4843. struct btrfs_delayed_ref_head *head;
  4844. struct btrfs_delayed_ref_root *delayed_refs;
  4845. struct btrfs_delayed_ref_node *ref;
  4846. struct rb_node *node;
  4847. int ret = 0;
  4848. delayed_refs = &trans->transaction->delayed_refs;
  4849. spin_lock(&delayed_refs->lock);
  4850. head = btrfs_find_delayed_ref_head(trans, bytenr);
  4851. if (!head)
  4852. goto out;
  4853. node = rb_prev(&head->node.rb_node);
  4854. if (!node)
  4855. goto out;
  4856. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  4857. /* there are still entries for this ref, we can't drop it */
  4858. if (ref->bytenr == bytenr)
  4859. goto out;
  4860. if (head->extent_op) {
  4861. if (!head->must_insert_reserved)
  4862. goto out;
  4863. btrfs_free_delayed_extent_op(head->extent_op);
  4864. head->extent_op = NULL;
  4865. }
  4866. /*
  4867. * waiting for the lock here would deadlock. If someone else has it
  4868. * locked they are already in the process of dropping it anyway
  4869. */
  4870. if (!mutex_trylock(&head->mutex))
  4871. goto out;
  4872. /*
  4873. * at this point we have a head with no other entries. Go
  4874. * ahead and process it.
  4875. */
  4876. head->node.in_tree = 0;
  4877. rb_erase(&head->node.rb_node, &delayed_refs->root);
  4878. delayed_refs->num_entries--;
  4879. /*
  4880. * we don't take a ref on the node because we're removing it from the
  4881. * tree, so we just steal the ref the tree was holding.
  4882. */
  4883. delayed_refs->num_heads--;
  4884. if (list_empty(&head->cluster))
  4885. delayed_refs->num_heads_ready--;
  4886. list_del_init(&head->cluster);
  4887. spin_unlock(&delayed_refs->lock);
  4888. BUG_ON(head->extent_op);
  4889. if (head->must_insert_reserved)
  4890. ret = 1;
  4891. mutex_unlock(&head->mutex);
  4892. btrfs_put_delayed_ref(&head->node);
  4893. return ret;
  4894. out:
  4895. spin_unlock(&delayed_refs->lock);
  4896. return 0;
  4897. }
  4898. void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
  4899. struct btrfs_root *root,
  4900. struct extent_buffer *buf,
  4901. u64 parent, int last_ref)
  4902. {
  4903. struct btrfs_block_group_cache *cache = NULL;
  4904. int ret;
  4905. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  4906. ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
  4907. buf->start, buf->len,
  4908. parent, root->root_key.objectid,
  4909. btrfs_header_level(buf),
  4910. BTRFS_DROP_DELAYED_REF, NULL, 0);
  4911. BUG_ON(ret); /* -ENOMEM */
  4912. }
  4913. if (!last_ref)
  4914. return;
  4915. cache = btrfs_lookup_block_group(root->fs_info, buf->start);
  4916. if (btrfs_header_generation(buf) == trans->transid) {
  4917. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  4918. ret = check_ref_cleanup(trans, root, buf->start);
  4919. if (!ret)
  4920. goto out;
  4921. }
  4922. if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
  4923. pin_down_extent(root, cache, buf->start, buf->len, 1);
  4924. goto out;
  4925. }
  4926. WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
  4927. btrfs_add_free_space(cache, buf->start, buf->len);
  4928. btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
  4929. }
  4930. out:
  4931. /*
  4932. * Deleting the buffer, clear the corrupt flag since it doesn't matter
  4933. * anymore.
  4934. */
  4935. clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
  4936. btrfs_put_block_group(cache);
  4937. }
  4938. /* Can return -ENOMEM */
  4939. int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  4940. u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
  4941. u64 owner, u64 offset, int for_cow)
  4942. {
  4943. int ret;
  4944. struct btrfs_fs_info *fs_info = root->fs_info;
  4945. /*
  4946. * tree log blocks never actually go into the extent allocation
  4947. * tree, just update pinning info and exit early.
  4948. */
  4949. if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
  4950. WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
  4951. /* unlocks the pinned mutex */
  4952. btrfs_pin_extent(root, bytenr, num_bytes, 1);
  4953. ret = 0;
  4954. } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  4955. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  4956. num_bytes,
  4957. parent, root_objectid, (int)owner,
  4958. BTRFS_DROP_DELAYED_REF, NULL, for_cow);
  4959. } else {
  4960. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  4961. num_bytes,
  4962. parent, root_objectid, owner,
  4963. offset, BTRFS_DROP_DELAYED_REF,
  4964. NULL, for_cow);
  4965. }
  4966. return ret;
  4967. }
  4968. static u64 stripe_align(struct btrfs_root *root,
  4969. struct btrfs_block_group_cache *cache,
  4970. u64 val, u64 num_bytes)
  4971. {
  4972. u64 ret = ALIGN(val, root->stripesize);
  4973. return ret;
  4974. }
  4975. /*
  4976. * when we wait for progress in the block group caching, its because
  4977. * our allocation attempt failed at least once. So, we must sleep
  4978. * and let some progress happen before we try again.
  4979. *
  4980. * This function will sleep at least once waiting for new free space to
  4981. * show up, and then it will check the block group free space numbers
  4982. * for our min num_bytes. Another option is to have it go ahead
  4983. * and look in the rbtree for a free extent of a given size, but this
  4984. * is a good start.
  4985. */
  4986. static noinline int
  4987. wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
  4988. u64 num_bytes)
  4989. {
  4990. struct btrfs_caching_control *caching_ctl;
  4991. caching_ctl = get_caching_control(cache);
  4992. if (!caching_ctl)
  4993. return 0;
  4994. wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
  4995. (cache->free_space_ctl->free_space >= num_bytes));
  4996. put_caching_control(caching_ctl);
  4997. return 0;
  4998. }
  4999. static noinline int
  5000. wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
  5001. {
  5002. struct btrfs_caching_control *caching_ctl;
  5003. caching_ctl = get_caching_control(cache);
  5004. if (!caching_ctl)
  5005. return 0;
  5006. wait_event(caching_ctl->wait, block_group_cache_done(cache));
  5007. put_caching_control(caching_ctl);
  5008. return 0;
  5009. }
  5010. int __get_raid_index(u64 flags)
  5011. {
  5012. if (flags & BTRFS_BLOCK_GROUP_RAID10)
  5013. return BTRFS_RAID_RAID10;
  5014. else if (flags & BTRFS_BLOCK_GROUP_RAID1)
  5015. return BTRFS_RAID_RAID1;
  5016. else if (flags & BTRFS_BLOCK_GROUP_DUP)
  5017. return BTRFS_RAID_DUP;
  5018. else if (flags & BTRFS_BLOCK_GROUP_RAID0)
  5019. return BTRFS_RAID_RAID0;
  5020. else if (flags & BTRFS_BLOCK_GROUP_RAID5)
  5021. return BTRFS_RAID_RAID5;
  5022. else if (flags & BTRFS_BLOCK_GROUP_RAID6)
  5023. return BTRFS_RAID_RAID6;
  5024. return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
  5025. }
  5026. static int get_block_group_index(struct btrfs_block_group_cache *cache)
  5027. {
  5028. return __get_raid_index(cache->flags);
  5029. }
  5030. enum btrfs_loop_type {
  5031. LOOP_CACHING_NOWAIT = 0,
  5032. LOOP_CACHING_WAIT = 1,
  5033. LOOP_ALLOC_CHUNK = 2,
  5034. LOOP_NO_EMPTY_SIZE = 3,
  5035. };
  5036. /*
  5037. * walks the btree of allocated extents and find a hole of a given size.
  5038. * The key ins is changed to record the hole:
  5039. * ins->objectid == block start
  5040. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  5041. * ins->offset == number of blocks
  5042. * Any available blocks before search_start are skipped.
  5043. */
  5044. static noinline int find_free_extent(struct btrfs_trans_handle *trans,
  5045. struct btrfs_root *orig_root,
  5046. u64 num_bytes, u64 empty_size,
  5047. u64 hint_byte, struct btrfs_key *ins,
  5048. u64 data)
  5049. {
  5050. int ret = 0;
  5051. struct btrfs_root *root = orig_root->fs_info->extent_root;
  5052. struct btrfs_free_cluster *last_ptr = NULL;
  5053. struct btrfs_block_group_cache *block_group = NULL;
  5054. struct btrfs_block_group_cache *used_block_group;
  5055. u64 search_start = 0;
  5056. int empty_cluster = 2 * 1024 * 1024;
  5057. struct btrfs_space_info *space_info;
  5058. int loop = 0;
  5059. int index = __get_raid_index(data);
  5060. int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
  5061. RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
  5062. bool found_uncached_bg = false;
  5063. bool failed_cluster_refill = false;
  5064. bool failed_alloc = false;
  5065. bool use_cluster = true;
  5066. bool have_caching_bg = false;
  5067. WARN_ON(num_bytes < root->sectorsize);
  5068. btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
  5069. ins->objectid = 0;
  5070. ins->offset = 0;
  5071. trace_find_free_extent(orig_root, num_bytes, empty_size, data);
  5072. space_info = __find_space_info(root->fs_info, data);
  5073. if (!space_info) {
  5074. printk(KERN_ERR "No space info for %llu\n", data);
  5075. return -ENOSPC;
  5076. }
  5077. /*
  5078. * If the space info is for both data and metadata it means we have a
  5079. * small filesystem and we can't use the clustering stuff.
  5080. */
  5081. if (btrfs_mixed_space_info(space_info))
  5082. use_cluster = false;
  5083. if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
  5084. last_ptr = &root->fs_info->meta_alloc_cluster;
  5085. if (!btrfs_test_opt(root, SSD))
  5086. empty_cluster = 64 * 1024;
  5087. }
  5088. if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
  5089. btrfs_test_opt(root, SSD)) {
  5090. last_ptr = &root->fs_info->data_alloc_cluster;
  5091. }
  5092. if (last_ptr) {
  5093. spin_lock(&last_ptr->lock);
  5094. if (last_ptr->block_group)
  5095. hint_byte = last_ptr->window_start;
  5096. spin_unlock(&last_ptr->lock);
  5097. }
  5098. search_start = max(search_start, first_logical_byte(root, 0));
  5099. search_start = max(search_start, hint_byte);
  5100. if (!last_ptr)
  5101. empty_cluster = 0;
  5102. if (search_start == hint_byte) {
  5103. block_group = btrfs_lookup_block_group(root->fs_info,
  5104. search_start);
  5105. used_block_group = block_group;
  5106. /*
  5107. * we don't want to use the block group if it doesn't match our
  5108. * allocation bits, or if its not cached.
  5109. *
  5110. * However if we are re-searching with an ideal block group
  5111. * picked out then we don't care that the block group is cached.
  5112. */
  5113. if (block_group && block_group_bits(block_group, data) &&
  5114. block_group->cached != BTRFS_CACHE_NO) {
  5115. down_read(&space_info->groups_sem);
  5116. if (list_empty(&block_group->list) ||
  5117. block_group->ro) {
  5118. /*
  5119. * someone is removing this block group,
  5120. * we can't jump into the have_block_group
  5121. * target because our list pointers are not
  5122. * valid
  5123. */
  5124. btrfs_put_block_group(block_group);
  5125. up_read(&space_info->groups_sem);
  5126. } else {
  5127. index = get_block_group_index(block_group);
  5128. goto have_block_group;
  5129. }
  5130. } else if (block_group) {
  5131. btrfs_put_block_group(block_group);
  5132. }
  5133. }
  5134. search:
  5135. have_caching_bg = false;
  5136. down_read(&space_info->groups_sem);
  5137. list_for_each_entry(block_group, &space_info->block_groups[index],
  5138. list) {
  5139. u64 offset;
  5140. int cached;
  5141. used_block_group = block_group;
  5142. btrfs_get_block_group(block_group);
  5143. search_start = block_group->key.objectid;
  5144. /*
  5145. * this can happen if we end up cycling through all the
  5146. * raid types, but we want to make sure we only allocate
  5147. * for the proper type.
  5148. */
  5149. if (!block_group_bits(block_group, data)) {
  5150. u64 extra = BTRFS_BLOCK_GROUP_DUP |
  5151. BTRFS_BLOCK_GROUP_RAID1 |
  5152. BTRFS_BLOCK_GROUP_RAID5 |
  5153. BTRFS_BLOCK_GROUP_RAID6 |
  5154. BTRFS_BLOCK_GROUP_RAID10;
  5155. /*
  5156. * if they asked for extra copies and this block group
  5157. * doesn't provide them, bail. This does allow us to
  5158. * fill raid0 from raid1.
  5159. */
  5160. if ((data & extra) && !(block_group->flags & extra))
  5161. goto loop;
  5162. }
  5163. have_block_group:
  5164. cached = block_group_cache_done(block_group);
  5165. if (unlikely(!cached)) {
  5166. found_uncached_bg = true;
  5167. ret = cache_block_group(block_group, 0);
  5168. BUG_ON(ret < 0);
  5169. ret = 0;
  5170. }
  5171. if (unlikely(block_group->ro))
  5172. goto loop;
  5173. /*
  5174. * Ok we want to try and use the cluster allocator, so
  5175. * lets look there
  5176. */
  5177. if (last_ptr) {
  5178. unsigned long aligned_cluster;
  5179. /*
  5180. * the refill lock keeps out other
  5181. * people trying to start a new cluster
  5182. */
  5183. spin_lock(&last_ptr->refill_lock);
  5184. used_block_group = last_ptr->block_group;
  5185. if (used_block_group != block_group &&
  5186. (!used_block_group ||
  5187. used_block_group->ro ||
  5188. !block_group_bits(used_block_group, data))) {
  5189. used_block_group = block_group;
  5190. goto refill_cluster;
  5191. }
  5192. if (used_block_group != block_group)
  5193. btrfs_get_block_group(used_block_group);
  5194. offset = btrfs_alloc_from_cluster(used_block_group,
  5195. last_ptr, num_bytes, used_block_group->key.objectid);
  5196. if (offset) {
  5197. /* we have a block, we're done */
  5198. spin_unlock(&last_ptr->refill_lock);
  5199. trace_btrfs_reserve_extent_cluster(root,
  5200. block_group, search_start, num_bytes);
  5201. goto checks;
  5202. }
  5203. WARN_ON(last_ptr->block_group != used_block_group);
  5204. if (used_block_group != block_group) {
  5205. btrfs_put_block_group(used_block_group);
  5206. used_block_group = block_group;
  5207. }
  5208. refill_cluster:
  5209. BUG_ON(used_block_group != block_group);
  5210. /* If we are on LOOP_NO_EMPTY_SIZE, we can't
  5211. * set up a new clusters, so lets just skip it
  5212. * and let the allocator find whatever block
  5213. * it can find. If we reach this point, we
  5214. * will have tried the cluster allocator
  5215. * plenty of times and not have found
  5216. * anything, so we are likely way too
  5217. * fragmented for the clustering stuff to find
  5218. * anything.
  5219. *
  5220. * However, if the cluster is taken from the
  5221. * current block group, release the cluster
  5222. * first, so that we stand a better chance of
  5223. * succeeding in the unclustered
  5224. * allocation. */
  5225. if (loop >= LOOP_NO_EMPTY_SIZE &&
  5226. last_ptr->block_group != block_group) {
  5227. spin_unlock(&last_ptr->refill_lock);
  5228. goto unclustered_alloc;
  5229. }
  5230. /*
  5231. * this cluster didn't work out, free it and
  5232. * start over
  5233. */
  5234. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  5235. if (loop >= LOOP_NO_EMPTY_SIZE) {
  5236. spin_unlock(&last_ptr->refill_lock);
  5237. goto unclustered_alloc;
  5238. }
  5239. aligned_cluster = max_t(unsigned long,
  5240. empty_cluster + empty_size,
  5241. block_group->full_stripe_len);
  5242. /* allocate a cluster in this block group */
  5243. ret = btrfs_find_space_cluster(trans, root,
  5244. block_group, last_ptr,
  5245. search_start, num_bytes,
  5246. aligned_cluster);
  5247. if (ret == 0) {
  5248. /*
  5249. * now pull our allocation out of this
  5250. * cluster
  5251. */
  5252. offset = btrfs_alloc_from_cluster(block_group,
  5253. last_ptr, num_bytes,
  5254. search_start);
  5255. if (offset) {
  5256. /* we found one, proceed */
  5257. spin_unlock(&last_ptr->refill_lock);
  5258. trace_btrfs_reserve_extent_cluster(root,
  5259. block_group, search_start,
  5260. num_bytes);
  5261. goto checks;
  5262. }
  5263. } else if (!cached && loop > LOOP_CACHING_NOWAIT
  5264. && !failed_cluster_refill) {
  5265. spin_unlock(&last_ptr->refill_lock);
  5266. failed_cluster_refill = true;
  5267. wait_block_group_cache_progress(block_group,
  5268. num_bytes + empty_cluster + empty_size);
  5269. goto have_block_group;
  5270. }
  5271. /*
  5272. * at this point we either didn't find a cluster
  5273. * or we weren't able to allocate a block from our
  5274. * cluster. Free the cluster we've been trying
  5275. * to use, and go to the next block group
  5276. */
  5277. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  5278. spin_unlock(&last_ptr->refill_lock);
  5279. goto loop;
  5280. }
  5281. unclustered_alloc:
  5282. spin_lock(&block_group->free_space_ctl->tree_lock);
  5283. if (cached &&
  5284. block_group->free_space_ctl->free_space <
  5285. num_bytes + empty_cluster + empty_size) {
  5286. spin_unlock(&block_group->free_space_ctl->tree_lock);
  5287. goto loop;
  5288. }
  5289. spin_unlock(&block_group->free_space_ctl->tree_lock);
  5290. offset = btrfs_find_space_for_alloc(block_group, search_start,
  5291. num_bytes, empty_size);
  5292. /*
  5293. * If we didn't find a chunk, and we haven't failed on this
  5294. * block group before, and this block group is in the middle of
  5295. * caching and we are ok with waiting, then go ahead and wait
  5296. * for progress to be made, and set failed_alloc to true.
  5297. *
  5298. * If failed_alloc is true then we've already waited on this
  5299. * block group once and should move on to the next block group.
  5300. */
  5301. if (!offset && !failed_alloc && !cached &&
  5302. loop > LOOP_CACHING_NOWAIT) {
  5303. wait_block_group_cache_progress(block_group,
  5304. num_bytes + empty_size);
  5305. failed_alloc = true;
  5306. goto have_block_group;
  5307. } else if (!offset) {
  5308. if (!cached)
  5309. have_caching_bg = true;
  5310. goto loop;
  5311. }
  5312. checks:
  5313. search_start = stripe_align(root, used_block_group,
  5314. offset, num_bytes);
  5315. /* move on to the next group */
  5316. if (search_start + num_bytes >
  5317. used_block_group->key.objectid + used_block_group->key.offset) {
  5318. btrfs_add_free_space(used_block_group, offset, num_bytes);
  5319. goto loop;
  5320. }
  5321. if (offset < search_start)
  5322. btrfs_add_free_space(used_block_group, offset,
  5323. search_start - offset);
  5324. BUG_ON(offset > search_start);
  5325. ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
  5326. alloc_type);
  5327. if (ret == -EAGAIN) {
  5328. btrfs_add_free_space(used_block_group, offset, num_bytes);
  5329. goto loop;
  5330. }
  5331. /* we are all good, lets return */
  5332. ins->objectid = search_start;
  5333. ins->offset = num_bytes;
  5334. trace_btrfs_reserve_extent(orig_root, block_group,
  5335. search_start, num_bytes);
  5336. if (used_block_group != block_group)
  5337. btrfs_put_block_group(used_block_group);
  5338. btrfs_put_block_group(block_group);
  5339. break;
  5340. loop:
  5341. failed_cluster_refill = false;
  5342. failed_alloc = false;
  5343. BUG_ON(index != get_block_group_index(block_group));
  5344. if (used_block_group != block_group)
  5345. btrfs_put_block_group(used_block_group);
  5346. btrfs_put_block_group(block_group);
  5347. }
  5348. up_read(&space_info->groups_sem);
  5349. if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
  5350. goto search;
  5351. if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
  5352. goto search;
  5353. /*
  5354. * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
  5355. * caching kthreads as we move along
  5356. * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
  5357. * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
  5358. * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
  5359. * again
  5360. */
  5361. if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
  5362. index = 0;
  5363. loop++;
  5364. if (loop == LOOP_ALLOC_CHUNK) {
  5365. ret = do_chunk_alloc(trans, root, data,
  5366. CHUNK_ALLOC_FORCE);
  5367. /*
  5368. * Do not bail out on ENOSPC since we
  5369. * can do more things.
  5370. */
  5371. if (ret < 0 && ret != -ENOSPC) {
  5372. btrfs_abort_transaction(trans,
  5373. root, ret);
  5374. goto out;
  5375. }
  5376. }
  5377. if (loop == LOOP_NO_EMPTY_SIZE) {
  5378. empty_size = 0;
  5379. empty_cluster = 0;
  5380. }
  5381. goto search;
  5382. } else if (!ins->objectid) {
  5383. ret = -ENOSPC;
  5384. } else if (ins->objectid) {
  5385. ret = 0;
  5386. }
  5387. out:
  5388. return ret;
  5389. }
  5390. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  5391. int dump_block_groups)
  5392. {
  5393. struct btrfs_block_group_cache *cache;
  5394. int index = 0;
  5395. spin_lock(&info->lock);
  5396. printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
  5397. (unsigned long long)info->flags,
  5398. (unsigned long long)(info->total_bytes - info->bytes_used -
  5399. info->bytes_pinned - info->bytes_reserved -
  5400. info->bytes_readonly),
  5401. (info->full) ? "" : "not ");
  5402. printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
  5403. "reserved=%llu, may_use=%llu, readonly=%llu\n",
  5404. (unsigned long long)info->total_bytes,
  5405. (unsigned long long)info->bytes_used,
  5406. (unsigned long long)info->bytes_pinned,
  5407. (unsigned long long)info->bytes_reserved,
  5408. (unsigned long long)info->bytes_may_use,
  5409. (unsigned long long)info->bytes_readonly);
  5410. spin_unlock(&info->lock);
  5411. if (!dump_block_groups)
  5412. return;
  5413. down_read(&info->groups_sem);
  5414. again:
  5415. list_for_each_entry(cache, &info->block_groups[index], list) {
  5416. spin_lock(&cache->lock);
  5417. printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
  5418. (unsigned long long)cache->key.objectid,
  5419. (unsigned long long)cache->key.offset,
  5420. (unsigned long long)btrfs_block_group_used(&cache->item),
  5421. (unsigned long long)cache->pinned,
  5422. (unsigned long long)cache->reserved,
  5423. cache->ro ? "[readonly]" : "");
  5424. btrfs_dump_free_space(cache, bytes);
  5425. spin_unlock(&cache->lock);
  5426. }
  5427. if (++index < BTRFS_NR_RAID_TYPES)
  5428. goto again;
  5429. up_read(&info->groups_sem);
  5430. }
  5431. int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
  5432. struct btrfs_root *root,
  5433. u64 num_bytes, u64 min_alloc_size,
  5434. u64 empty_size, u64 hint_byte,
  5435. struct btrfs_key *ins, u64 data)
  5436. {
  5437. bool final_tried = false;
  5438. int ret;
  5439. data = btrfs_get_alloc_profile(root, data);
  5440. again:
  5441. WARN_ON(num_bytes < root->sectorsize);
  5442. ret = find_free_extent(trans, root, num_bytes, empty_size,
  5443. hint_byte, ins, data);
  5444. if (ret == -ENOSPC) {
  5445. if (!final_tried) {
  5446. num_bytes = num_bytes >> 1;
  5447. num_bytes = round_down(num_bytes, root->sectorsize);
  5448. num_bytes = max(num_bytes, min_alloc_size);
  5449. if (num_bytes == min_alloc_size)
  5450. final_tried = true;
  5451. goto again;
  5452. } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
  5453. struct btrfs_space_info *sinfo;
  5454. sinfo = __find_space_info(root->fs_info, data);
  5455. printk(KERN_ERR "btrfs allocation failed flags %llu, "
  5456. "wanted %llu\n", (unsigned long long)data,
  5457. (unsigned long long)num_bytes);
  5458. if (sinfo)
  5459. dump_space_info(sinfo, num_bytes, 1);
  5460. }
  5461. }
  5462. trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
  5463. return ret;
  5464. }
  5465. static int __btrfs_free_reserved_extent(struct btrfs_root *root,
  5466. u64 start, u64 len, int pin)
  5467. {
  5468. struct btrfs_block_group_cache *cache;
  5469. int ret = 0;
  5470. cache = btrfs_lookup_block_group(root->fs_info, start);
  5471. if (!cache) {
  5472. printk(KERN_ERR "Unable to find block group for %llu\n",
  5473. (unsigned long long)start);
  5474. return -ENOSPC;
  5475. }
  5476. if (btrfs_test_opt(root, DISCARD))
  5477. ret = btrfs_discard_extent(root, start, len, NULL);
  5478. if (pin)
  5479. pin_down_extent(root, cache, start, len, 1);
  5480. else {
  5481. btrfs_add_free_space(cache, start, len);
  5482. btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
  5483. }
  5484. btrfs_put_block_group(cache);
  5485. trace_btrfs_reserved_extent_free(root, start, len);
  5486. return ret;
  5487. }
  5488. int btrfs_free_reserved_extent(struct btrfs_root *root,
  5489. u64 start, u64 len)
  5490. {
  5491. return __btrfs_free_reserved_extent(root, start, len, 0);
  5492. }
  5493. int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
  5494. u64 start, u64 len)
  5495. {
  5496. return __btrfs_free_reserved_extent(root, start, len, 1);
  5497. }
  5498. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  5499. struct btrfs_root *root,
  5500. u64 parent, u64 root_objectid,
  5501. u64 flags, u64 owner, u64 offset,
  5502. struct btrfs_key *ins, int ref_mod)
  5503. {
  5504. int ret;
  5505. struct btrfs_fs_info *fs_info = root->fs_info;
  5506. struct btrfs_extent_item *extent_item;
  5507. struct btrfs_extent_inline_ref *iref;
  5508. struct btrfs_path *path;
  5509. struct extent_buffer *leaf;
  5510. int type;
  5511. u32 size;
  5512. if (parent > 0)
  5513. type = BTRFS_SHARED_DATA_REF_KEY;
  5514. else
  5515. type = BTRFS_EXTENT_DATA_REF_KEY;
  5516. size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
  5517. path = btrfs_alloc_path();
  5518. if (!path)
  5519. return -ENOMEM;
  5520. path->leave_spinning = 1;
  5521. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  5522. ins, size);
  5523. if (ret) {
  5524. btrfs_free_path(path);
  5525. return ret;
  5526. }
  5527. leaf = path->nodes[0];
  5528. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  5529. struct btrfs_extent_item);
  5530. btrfs_set_extent_refs(leaf, extent_item, ref_mod);
  5531. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  5532. btrfs_set_extent_flags(leaf, extent_item,
  5533. flags | BTRFS_EXTENT_FLAG_DATA);
  5534. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  5535. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  5536. if (parent > 0) {
  5537. struct btrfs_shared_data_ref *ref;
  5538. ref = (struct btrfs_shared_data_ref *)(iref + 1);
  5539. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  5540. btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
  5541. } else {
  5542. struct btrfs_extent_data_ref *ref;
  5543. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  5544. btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
  5545. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  5546. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  5547. btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
  5548. }
  5549. btrfs_mark_buffer_dirty(path->nodes[0]);
  5550. btrfs_free_path(path);
  5551. ret = update_block_group(root, ins->objectid, ins->offset, 1);
  5552. if (ret) { /* -ENOENT, logic error */
  5553. printk(KERN_ERR "btrfs update block group failed for %llu "
  5554. "%llu\n", (unsigned long long)ins->objectid,
  5555. (unsigned long long)ins->offset);
  5556. BUG();
  5557. }
  5558. return ret;
  5559. }
  5560. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  5561. struct btrfs_root *root,
  5562. u64 parent, u64 root_objectid,
  5563. u64 flags, struct btrfs_disk_key *key,
  5564. int level, struct btrfs_key *ins)
  5565. {
  5566. int ret;
  5567. struct btrfs_fs_info *fs_info = root->fs_info;
  5568. struct btrfs_extent_item *extent_item;
  5569. struct btrfs_tree_block_info *block_info;
  5570. struct btrfs_extent_inline_ref *iref;
  5571. struct btrfs_path *path;
  5572. struct extent_buffer *leaf;
  5573. u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
  5574. path = btrfs_alloc_path();
  5575. if (!path)
  5576. return -ENOMEM;
  5577. path->leave_spinning = 1;
  5578. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  5579. ins, size);
  5580. if (ret) {
  5581. btrfs_free_path(path);
  5582. return ret;
  5583. }
  5584. leaf = path->nodes[0];
  5585. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  5586. struct btrfs_extent_item);
  5587. btrfs_set_extent_refs(leaf, extent_item, 1);
  5588. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  5589. btrfs_set_extent_flags(leaf, extent_item,
  5590. flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
  5591. block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
  5592. btrfs_set_tree_block_key(leaf, block_info, key);
  5593. btrfs_set_tree_block_level(leaf, block_info, level);
  5594. iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
  5595. if (parent > 0) {
  5596. BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
  5597. btrfs_set_extent_inline_ref_type(leaf, iref,
  5598. BTRFS_SHARED_BLOCK_REF_KEY);
  5599. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  5600. } else {
  5601. btrfs_set_extent_inline_ref_type(leaf, iref,
  5602. BTRFS_TREE_BLOCK_REF_KEY);
  5603. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  5604. }
  5605. btrfs_mark_buffer_dirty(leaf);
  5606. btrfs_free_path(path);
  5607. ret = update_block_group(root, ins->objectid, ins->offset, 1);
  5608. if (ret) { /* -ENOENT, logic error */
  5609. printk(KERN_ERR "btrfs update block group failed for %llu "
  5610. "%llu\n", (unsigned long long)ins->objectid,
  5611. (unsigned long long)ins->offset);
  5612. BUG();
  5613. }
  5614. return ret;
  5615. }
  5616. int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  5617. struct btrfs_root *root,
  5618. u64 root_objectid, u64 owner,
  5619. u64 offset, struct btrfs_key *ins)
  5620. {
  5621. int ret;
  5622. BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
  5623. ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
  5624. ins->offset, 0,
  5625. root_objectid, owner, offset,
  5626. BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
  5627. return ret;
  5628. }
  5629. /*
  5630. * this is used by the tree logging recovery code. It records that
  5631. * an extent has been allocated and makes sure to clear the free
  5632. * space cache bits as well
  5633. */
  5634. int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
  5635. struct btrfs_root *root,
  5636. u64 root_objectid, u64 owner, u64 offset,
  5637. struct btrfs_key *ins)
  5638. {
  5639. int ret;
  5640. struct btrfs_block_group_cache *block_group;
  5641. struct btrfs_caching_control *caching_ctl;
  5642. u64 start = ins->objectid;
  5643. u64 num_bytes = ins->offset;
  5644. block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
  5645. cache_block_group(block_group, 0);
  5646. caching_ctl = get_caching_control(block_group);
  5647. if (!caching_ctl) {
  5648. BUG_ON(!block_group_cache_done(block_group));
  5649. ret = btrfs_remove_free_space(block_group, start, num_bytes);
  5650. BUG_ON(ret); /* -ENOMEM */
  5651. } else {
  5652. mutex_lock(&caching_ctl->mutex);
  5653. if (start >= caching_ctl->progress) {
  5654. ret = add_excluded_extent(root, start, num_bytes);
  5655. BUG_ON(ret); /* -ENOMEM */
  5656. } else if (start + num_bytes <= caching_ctl->progress) {
  5657. ret = btrfs_remove_free_space(block_group,
  5658. start, num_bytes);
  5659. BUG_ON(ret); /* -ENOMEM */
  5660. } else {
  5661. num_bytes = caching_ctl->progress - start;
  5662. ret = btrfs_remove_free_space(block_group,
  5663. start, num_bytes);
  5664. BUG_ON(ret); /* -ENOMEM */
  5665. start = caching_ctl->progress;
  5666. num_bytes = ins->objectid + ins->offset -
  5667. caching_ctl->progress;
  5668. ret = add_excluded_extent(root, start, num_bytes);
  5669. BUG_ON(ret); /* -ENOMEM */
  5670. }
  5671. mutex_unlock(&caching_ctl->mutex);
  5672. put_caching_control(caching_ctl);
  5673. }
  5674. ret = btrfs_update_reserved_bytes(block_group, ins->offset,
  5675. RESERVE_ALLOC_NO_ACCOUNT);
  5676. BUG_ON(ret); /* logic error */
  5677. btrfs_put_block_group(block_group);
  5678. ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
  5679. 0, owner, offset, ins, 1);
  5680. return ret;
  5681. }
  5682. struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
  5683. struct btrfs_root *root,
  5684. u64 bytenr, u32 blocksize,
  5685. int level)
  5686. {
  5687. struct extent_buffer *buf;
  5688. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  5689. if (!buf)
  5690. return ERR_PTR(-ENOMEM);
  5691. btrfs_set_header_generation(buf, trans->transid);
  5692. btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
  5693. btrfs_tree_lock(buf);
  5694. clean_tree_block(trans, root, buf);
  5695. clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
  5696. btrfs_set_lock_blocking(buf);
  5697. btrfs_set_buffer_uptodate(buf);
  5698. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  5699. /*
  5700. * we allow two log transactions at a time, use different
  5701. * EXENT bit to differentiate dirty pages.
  5702. */
  5703. if (root->log_transid % 2 == 0)
  5704. set_extent_dirty(&root->dirty_log_pages, buf->start,
  5705. buf->start + buf->len - 1, GFP_NOFS);
  5706. else
  5707. set_extent_new(&root->dirty_log_pages, buf->start,
  5708. buf->start + buf->len - 1, GFP_NOFS);
  5709. } else {
  5710. set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
  5711. buf->start + buf->len - 1, GFP_NOFS);
  5712. }
  5713. trans->blocks_used++;
  5714. /* this returns a buffer locked for blocking */
  5715. return buf;
  5716. }
  5717. static struct btrfs_block_rsv *
  5718. use_block_rsv(struct btrfs_trans_handle *trans,
  5719. struct btrfs_root *root, u32 blocksize)
  5720. {
  5721. struct btrfs_block_rsv *block_rsv;
  5722. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  5723. int ret;
  5724. block_rsv = get_block_rsv(trans, root);
  5725. if (block_rsv->size == 0) {
  5726. ret = reserve_metadata_bytes(root, block_rsv, blocksize,
  5727. BTRFS_RESERVE_NO_FLUSH);
  5728. /*
  5729. * If we couldn't reserve metadata bytes try and use some from
  5730. * the global reserve.
  5731. */
  5732. if (ret && block_rsv != global_rsv) {
  5733. ret = block_rsv_use_bytes(global_rsv, blocksize);
  5734. if (!ret)
  5735. return global_rsv;
  5736. return ERR_PTR(ret);
  5737. } else if (ret) {
  5738. return ERR_PTR(ret);
  5739. }
  5740. return block_rsv;
  5741. }
  5742. ret = block_rsv_use_bytes(block_rsv, blocksize);
  5743. if (!ret)
  5744. return block_rsv;
  5745. if (ret && !block_rsv->failfast) {
  5746. if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
  5747. static DEFINE_RATELIMIT_STATE(_rs,
  5748. DEFAULT_RATELIMIT_INTERVAL * 10,
  5749. /*DEFAULT_RATELIMIT_BURST*/ 1);
  5750. if (__ratelimit(&_rs))
  5751. WARN(1, KERN_DEBUG
  5752. "btrfs: block rsv returned %d\n", ret);
  5753. }
  5754. ret = reserve_metadata_bytes(root, block_rsv, blocksize,
  5755. BTRFS_RESERVE_NO_FLUSH);
  5756. if (!ret) {
  5757. return block_rsv;
  5758. } else if (ret && block_rsv != global_rsv) {
  5759. ret = block_rsv_use_bytes(global_rsv, blocksize);
  5760. if (!ret)
  5761. return global_rsv;
  5762. }
  5763. }
  5764. return ERR_PTR(-ENOSPC);
  5765. }
  5766. static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
  5767. struct btrfs_block_rsv *block_rsv, u32 blocksize)
  5768. {
  5769. block_rsv_add_bytes(block_rsv, blocksize, 0);
  5770. block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
  5771. }
  5772. /*
  5773. * finds a free extent and does all the dirty work required for allocation
  5774. * returns the key for the extent through ins, and a tree buffer for
  5775. * the first block of the extent through buf.
  5776. *
  5777. * returns the tree buffer or NULL.
  5778. */
  5779. struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
  5780. struct btrfs_root *root, u32 blocksize,
  5781. u64 parent, u64 root_objectid,
  5782. struct btrfs_disk_key *key, int level,
  5783. u64 hint, u64 empty_size)
  5784. {
  5785. struct btrfs_key ins;
  5786. struct btrfs_block_rsv *block_rsv;
  5787. struct extent_buffer *buf;
  5788. u64 flags = 0;
  5789. int ret;
  5790. block_rsv = use_block_rsv(trans, root, blocksize);
  5791. if (IS_ERR(block_rsv))
  5792. return ERR_CAST(block_rsv);
  5793. ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
  5794. empty_size, hint, &ins, 0);
  5795. if (ret) {
  5796. unuse_block_rsv(root->fs_info, block_rsv, blocksize);
  5797. return ERR_PTR(ret);
  5798. }
  5799. buf = btrfs_init_new_buffer(trans, root, ins.objectid,
  5800. blocksize, level);
  5801. BUG_ON(IS_ERR(buf)); /* -ENOMEM */
  5802. if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
  5803. if (parent == 0)
  5804. parent = ins.objectid;
  5805. flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
  5806. } else
  5807. BUG_ON(parent > 0);
  5808. if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
  5809. struct btrfs_delayed_extent_op *extent_op;
  5810. extent_op = btrfs_alloc_delayed_extent_op();
  5811. BUG_ON(!extent_op); /* -ENOMEM */
  5812. if (key)
  5813. memcpy(&extent_op->key, key, sizeof(extent_op->key));
  5814. else
  5815. memset(&extent_op->key, 0, sizeof(extent_op->key));
  5816. extent_op->flags_to_set = flags;
  5817. extent_op->update_key = 1;
  5818. extent_op->update_flags = 1;
  5819. extent_op->is_data = 0;
  5820. ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
  5821. ins.objectid,
  5822. ins.offset, parent, root_objectid,
  5823. level, BTRFS_ADD_DELAYED_EXTENT,
  5824. extent_op, 0);
  5825. BUG_ON(ret); /* -ENOMEM */
  5826. }
  5827. return buf;
  5828. }
  5829. struct walk_control {
  5830. u64 refs[BTRFS_MAX_LEVEL];
  5831. u64 flags[BTRFS_MAX_LEVEL];
  5832. struct btrfs_key update_progress;
  5833. int stage;
  5834. int level;
  5835. int shared_level;
  5836. int update_ref;
  5837. int keep_locks;
  5838. int reada_slot;
  5839. int reada_count;
  5840. int for_reloc;
  5841. };
  5842. #define DROP_REFERENCE 1
  5843. #define UPDATE_BACKREF 2
  5844. static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
  5845. struct btrfs_root *root,
  5846. struct walk_control *wc,
  5847. struct btrfs_path *path)
  5848. {
  5849. u64 bytenr;
  5850. u64 generation;
  5851. u64 refs;
  5852. u64 flags;
  5853. u32 nritems;
  5854. u32 blocksize;
  5855. struct btrfs_key key;
  5856. struct extent_buffer *eb;
  5857. int ret;
  5858. int slot;
  5859. int nread = 0;
  5860. if (path->slots[wc->level] < wc->reada_slot) {
  5861. wc->reada_count = wc->reada_count * 2 / 3;
  5862. wc->reada_count = max(wc->reada_count, 2);
  5863. } else {
  5864. wc->reada_count = wc->reada_count * 3 / 2;
  5865. wc->reada_count = min_t(int, wc->reada_count,
  5866. BTRFS_NODEPTRS_PER_BLOCK(root));
  5867. }
  5868. eb = path->nodes[wc->level];
  5869. nritems = btrfs_header_nritems(eb);
  5870. blocksize = btrfs_level_size(root, wc->level - 1);
  5871. for (slot = path->slots[wc->level]; slot < nritems; slot++) {
  5872. if (nread >= wc->reada_count)
  5873. break;
  5874. cond_resched();
  5875. bytenr = btrfs_node_blockptr(eb, slot);
  5876. generation = btrfs_node_ptr_generation(eb, slot);
  5877. if (slot == path->slots[wc->level])
  5878. goto reada;
  5879. if (wc->stage == UPDATE_BACKREF &&
  5880. generation <= root->root_key.offset)
  5881. continue;
  5882. /* We don't lock the tree block, it's OK to be racy here */
  5883. ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
  5884. &refs, &flags);
  5885. /* We don't care about errors in readahead. */
  5886. if (ret < 0)
  5887. continue;
  5888. BUG_ON(refs == 0);
  5889. if (wc->stage == DROP_REFERENCE) {
  5890. if (refs == 1)
  5891. goto reada;
  5892. if (wc->level == 1 &&
  5893. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5894. continue;
  5895. if (!wc->update_ref ||
  5896. generation <= root->root_key.offset)
  5897. continue;
  5898. btrfs_node_key_to_cpu(eb, &key, slot);
  5899. ret = btrfs_comp_cpu_keys(&key,
  5900. &wc->update_progress);
  5901. if (ret < 0)
  5902. continue;
  5903. } else {
  5904. if (wc->level == 1 &&
  5905. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5906. continue;
  5907. }
  5908. reada:
  5909. ret = readahead_tree_block(root, bytenr, blocksize,
  5910. generation);
  5911. if (ret)
  5912. break;
  5913. nread++;
  5914. }
  5915. wc->reada_slot = slot;
  5916. }
  5917. /*
  5918. * hepler to process tree block while walking down the tree.
  5919. *
  5920. * when wc->stage == UPDATE_BACKREF, this function updates
  5921. * back refs for pointers in the block.
  5922. *
  5923. * NOTE: return value 1 means we should stop walking down.
  5924. */
  5925. static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
  5926. struct btrfs_root *root,
  5927. struct btrfs_path *path,
  5928. struct walk_control *wc, int lookup_info)
  5929. {
  5930. int level = wc->level;
  5931. struct extent_buffer *eb = path->nodes[level];
  5932. u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  5933. int ret;
  5934. if (wc->stage == UPDATE_BACKREF &&
  5935. btrfs_header_owner(eb) != root->root_key.objectid)
  5936. return 1;
  5937. /*
  5938. * when reference count of tree block is 1, it won't increase
  5939. * again. once full backref flag is set, we never clear it.
  5940. */
  5941. if (lookup_info &&
  5942. ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
  5943. (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
  5944. BUG_ON(!path->locks[level]);
  5945. ret = btrfs_lookup_extent_info(trans, root,
  5946. eb->start, eb->len,
  5947. &wc->refs[level],
  5948. &wc->flags[level]);
  5949. BUG_ON(ret == -ENOMEM);
  5950. if (ret)
  5951. return ret;
  5952. BUG_ON(wc->refs[level] == 0);
  5953. }
  5954. if (wc->stage == DROP_REFERENCE) {
  5955. if (wc->refs[level] > 1)
  5956. return 1;
  5957. if (path->locks[level] && !wc->keep_locks) {
  5958. btrfs_tree_unlock_rw(eb, path->locks[level]);
  5959. path->locks[level] = 0;
  5960. }
  5961. return 0;
  5962. }
  5963. /* wc->stage == UPDATE_BACKREF */
  5964. if (!(wc->flags[level] & flag)) {
  5965. BUG_ON(!path->locks[level]);
  5966. ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
  5967. BUG_ON(ret); /* -ENOMEM */
  5968. ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
  5969. BUG_ON(ret); /* -ENOMEM */
  5970. ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
  5971. eb->len, flag, 0);
  5972. BUG_ON(ret); /* -ENOMEM */
  5973. wc->flags[level] |= flag;
  5974. }
  5975. /*
  5976. * the block is shared by multiple trees, so it's not good to
  5977. * keep the tree lock
  5978. */
  5979. if (path->locks[level] && level > 0) {
  5980. btrfs_tree_unlock_rw(eb, path->locks[level]);
  5981. path->locks[level] = 0;
  5982. }
  5983. return 0;
  5984. }
  5985. /*
  5986. * hepler to process tree block pointer.
  5987. *
  5988. * when wc->stage == DROP_REFERENCE, this function checks
  5989. * reference count of the block pointed to. if the block
  5990. * is shared and we need update back refs for the subtree
  5991. * rooted at the block, this function changes wc->stage to
  5992. * UPDATE_BACKREF. if the block is shared and there is no
  5993. * need to update back, this function drops the reference
  5994. * to the block.
  5995. *
  5996. * NOTE: return value 1 means we should stop walking down.
  5997. */
  5998. static noinline int do_walk_down(struct btrfs_trans_handle *trans,
  5999. struct btrfs_root *root,
  6000. struct btrfs_path *path,
  6001. struct walk_control *wc, int *lookup_info)
  6002. {
  6003. u64 bytenr;
  6004. u64 generation;
  6005. u64 parent;
  6006. u32 blocksize;
  6007. struct btrfs_key key;
  6008. struct extent_buffer *next;
  6009. int level = wc->level;
  6010. int reada = 0;
  6011. int ret = 0;
  6012. generation = btrfs_node_ptr_generation(path->nodes[level],
  6013. path->slots[level]);
  6014. /*
  6015. * if the lower level block was created before the snapshot
  6016. * was created, we know there is no need to update back refs
  6017. * for the subtree
  6018. */
  6019. if (wc->stage == UPDATE_BACKREF &&
  6020. generation <= root->root_key.offset) {
  6021. *lookup_info = 1;
  6022. return 1;
  6023. }
  6024. bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
  6025. blocksize = btrfs_level_size(root, level - 1);
  6026. next = btrfs_find_tree_block(root, bytenr, blocksize);
  6027. if (!next) {
  6028. next = btrfs_find_create_tree_block(root, bytenr, blocksize);
  6029. if (!next)
  6030. return -ENOMEM;
  6031. reada = 1;
  6032. }
  6033. btrfs_tree_lock(next);
  6034. btrfs_set_lock_blocking(next);
  6035. ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
  6036. &wc->refs[level - 1],
  6037. &wc->flags[level - 1]);
  6038. if (ret < 0) {
  6039. btrfs_tree_unlock(next);
  6040. return ret;
  6041. }
  6042. BUG_ON(wc->refs[level - 1] == 0);
  6043. *lookup_info = 0;
  6044. if (wc->stage == DROP_REFERENCE) {
  6045. if (wc->refs[level - 1] > 1) {
  6046. if (level == 1 &&
  6047. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  6048. goto skip;
  6049. if (!wc->update_ref ||
  6050. generation <= root->root_key.offset)
  6051. goto skip;
  6052. btrfs_node_key_to_cpu(path->nodes[level], &key,
  6053. path->slots[level]);
  6054. ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
  6055. if (ret < 0)
  6056. goto skip;
  6057. wc->stage = UPDATE_BACKREF;
  6058. wc->shared_level = level - 1;
  6059. }
  6060. } else {
  6061. if (level == 1 &&
  6062. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  6063. goto skip;
  6064. }
  6065. if (!btrfs_buffer_uptodate(next, generation, 0)) {
  6066. btrfs_tree_unlock(next);
  6067. free_extent_buffer(next);
  6068. next = NULL;
  6069. *lookup_info = 1;
  6070. }
  6071. if (!next) {
  6072. if (reada && level == 1)
  6073. reada_walk_down(trans, root, wc, path);
  6074. next = read_tree_block(root, bytenr, blocksize, generation);
  6075. if (!next)
  6076. return -EIO;
  6077. btrfs_tree_lock(next);
  6078. btrfs_set_lock_blocking(next);
  6079. }
  6080. level--;
  6081. BUG_ON(level != btrfs_header_level(next));
  6082. path->nodes[level] = next;
  6083. path->slots[level] = 0;
  6084. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6085. wc->level = level;
  6086. if (wc->level == 1)
  6087. wc->reada_slot = 0;
  6088. return 0;
  6089. skip:
  6090. wc->refs[level - 1] = 0;
  6091. wc->flags[level - 1] = 0;
  6092. if (wc->stage == DROP_REFERENCE) {
  6093. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
  6094. parent = path->nodes[level]->start;
  6095. } else {
  6096. BUG_ON(root->root_key.objectid !=
  6097. btrfs_header_owner(path->nodes[level]));
  6098. parent = 0;
  6099. }
  6100. ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
  6101. root->root_key.objectid, level - 1, 0, 0);
  6102. BUG_ON(ret); /* -ENOMEM */
  6103. }
  6104. btrfs_tree_unlock(next);
  6105. free_extent_buffer(next);
  6106. *lookup_info = 1;
  6107. return 1;
  6108. }
  6109. /*
  6110. * hepler to process tree block while walking up the tree.
  6111. *
  6112. * when wc->stage == DROP_REFERENCE, this function drops
  6113. * reference count on the block.
  6114. *
  6115. * when wc->stage == UPDATE_BACKREF, this function changes
  6116. * wc->stage back to DROP_REFERENCE if we changed wc->stage
  6117. * to UPDATE_BACKREF previously while processing the block.
  6118. *
  6119. * NOTE: return value 1 means we should stop walking up.
  6120. */
  6121. static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
  6122. struct btrfs_root *root,
  6123. struct btrfs_path *path,
  6124. struct walk_control *wc)
  6125. {
  6126. int ret;
  6127. int level = wc->level;
  6128. struct extent_buffer *eb = path->nodes[level];
  6129. u64 parent = 0;
  6130. if (wc->stage == UPDATE_BACKREF) {
  6131. BUG_ON(wc->shared_level < level);
  6132. if (level < wc->shared_level)
  6133. goto out;
  6134. ret = find_next_key(path, level + 1, &wc->update_progress);
  6135. if (ret > 0)
  6136. wc->update_ref = 0;
  6137. wc->stage = DROP_REFERENCE;
  6138. wc->shared_level = -1;
  6139. path->slots[level] = 0;
  6140. /*
  6141. * check reference count again if the block isn't locked.
  6142. * we should start walking down the tree again if reference
  6143. * count is one.
  6144. */
  6145. if (!path->locks[level]) {
  6146. BUG_ON(level == 0);
  6147. btrfs_tree_lock(eb);
  6148. btrfs_set_lock_blocking(eb);
  6149. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6150. ret = btrfs_lookup_extent_info(trans, root,
  6151. eb->start, eb->len,
  6152. &wc->refs[level],
  6153. &wc->flags[level]);
  6154. if (ret < 0) {
  6155. btrfs_tree_unlock_rw(eb, path->locks[level]);
  6156. path->locks[level] = 0;
  6157. return ret;
  6158. }
  6159. BUG_ON(wc->refs[level] == 0);
  6160. if (wc->refs[level] == 1) {
  6161. btrfs_tree_unlock_rw(eb, path->locks[level]);
  6162. path->locks[level] = 0;
  6163. return 1;
  6164. }
  6165. }
  6166. }
  6167. /* wc->stage == DROP_REFERENCE */
  6168. BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
  6169. if (wc->refs[level] == 1) {
  6170. if (level == 0) {
  6171. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  6172. ret = btrfs_dec_ref(trans, root, eb, 1,
  6173. wc->for_reloc);
  6174. else
  6175. ret = btrfs_dec_ref(trans, root, eb, 0,
  6176. wc->for_reloc);
  6177. BUG_ON(ret); /* -ENOMEM */
  6178. }
  6179. /* make block locked assertion in clean_tree_block happy */
  6180. if (!path->locks[level] &&
  6181. btrfs_header_generation(eb) == trans->transid) {
  6182. btrfs_tree_lock(eb);
  6183. btrfs_set_lock_blocking(eb);
  6184. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6185. }
  6186. clean_tree_block(trans, root, eb);
  6187. }
  6188. if (eb == root->node) {
  6189. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  6190. parent = eb->start;
  6191. else
  6192. BUG_ON(root->root_key.objectid !=
  6193. btrfs_header_owner(eb));
  6194. } else {
  6195. if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  6196. parent = path->nodes[level + 1]->start;
  6197. else
  6198. BUG_ON(root->root_key.objectid !=
  6199. btrfs_header_owner(path->nodes[level + 1]));
  6200. }
  6201. btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
  6202. out:
  6203. wc->refs[level] = 0;
  6204. wc->flags[level] = 0;
  6205. return 0;
  6206. }
  6207. static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
  6208. struct btrfs_root *root,
  6209. struct btrfs_path *path,
  6210. struct walk_control *wc)
  6211. {
  6212. int level = wc->level;
  6213. int lookup_info = 1;
  6214. int ret;
  6215. while (level >= 0) {
  6216. ret = walk_down_proc(trans, root, path, wc, lookup_info);
  6217. if (ret > 0)
  6218. break;
  6219. if (level == 0)
  6220. break;
  6221. if (path->slots[level] >=
  6222. btrfs_header_nritems(path->nodes[level]))
  6223. break;
  6224. ret = do_walk_down(trans, root, path, wc, &lookup_info);
  6225. if (ret > 0) {
  6226. path->slots[level]++;
  6227. continue;
  6228. } else if (ret < 0)
  6229. return ret;
  6230. level = wc->level;
  6231. }
  6232. return 0;
  6233. }
  6234. static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  6235. struct btrfs_root *root,
  6236. struct btrfs_path *path,
  6237. struct walk_control *wc, int max_level)
  6238. {
  6239. int level = wc->level;
  6240. int ret;
  6241. path->slots[level] = btrfs_header_nritems(path->nodes[level]);
  6242. while (level < max_level && path->nodes[level]) {
  6243. wc->level = level;
  6244. if (path->slots[level] + 1 <
  6245. btrfs_header_nritems(path->nodes[level])) {
  6246. path->slots[level]++;
  6247. return 0;
  6248. } else {
  6249. ret = walk_up_proc(trans, root, path, wc);
  6250. if (ret > 0)
  6251. return 0;
  6252. if (path->locks[level]) {
  6253. btrfs_tree_unlock_rw(path->nodes[level],
  6254. path->locks[level]);
  6255. path->locks[level] = 0;
  6256. }
  6257. free_extent_buffer(path->nodes[level]);
  6258. path->nodes[level] = NULL;
  6259. level++;
  6260. }
  6261. }
  6262. return 1;
  6263. }
  6264. /*
  6265. * drop a subvolume tree.
  6266. *
  6267. * this function traverses the tree freeing any blocks that only
  6268. * referenced by the tree.
  6269. *
  6270. * when a shared tree block is found. this function decreases its
  6271. * reference count by one. if update_ref is true, this function
  6272. * also make sure backrefs for the shared block and all lower level
  6273. * blocks are properly updated.
  6274. */
  6275. int btrfs_drop_snapshot(struct btrfs_root *root,
  6276. struct btrfs_block_rsv *block_rsv, int update_ref,
  6277. int for_reloc)
  6278. {
  6279. struct btrfs_path *path;
  6280. struct btrfs_trans_handle *trans;
  6281. struct btrfs_root *tree_root = root->fs_info->tree_root;
  6282. struct btrfs_root_item *root_item = &root->root_item;
  6283. struct walk_control *wc;
  6284. struct btrfs_key key;
  6285. int err = 0;
  6286. int ret;
  6287. int level;
  6288. path = btrfs_alloc_path();
  6289. if (!path) {
  6290. err = -ENOMEM;
  6291. goto out;
  6292. }
  6293. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  6294. if (!wc) {
  6295. btrfs_free_path(path);
  6296. err = -ENOMEM;
  6297. goto out;
  6298. }
  6299. trans = btrfs_start_transaction(tree_root, 0);
  6300. if (IS_ERR(trans)) {
  6301. err = PTR_ERR(trans);
  6302. goto out_free;
  6303. }
  6304. if (block_rsv)
  6305. trans->block_rsv = block_rsv;
  6306. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  6307. level = btrfs_header_level(root->node);
  6308. path->nodes[level] = btrfs_lock_root_node(root);
  6309. btrfs_set_lock_blocking(path->nodes[level]);
  6310. path->slots[level] = 0;
  6311. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6312. memset(&wc->update_progress, 0,
  6313. sizeof(wc->update_progress));
  6314. } else {
  6315. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  6316. memcpy(&wc->update_progress, &key,
  6317. sizeof(wc->update_progress));
  6318. level = root_item->drop_level;
  6319. BUG_ON(level == 0);
  6320. path->lowest_level = level;
  6321. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  6322. path->lowest_level = 0;
  6323. if (ret < 0) {
  6324. err = ret;
  6325. goto out_end_trans;
  6326. }
  6327. WARN_ON(ret > 0);
  6328. /*
  6329. * unlock our path, this is safe because only this
  6330. * function is allowed to delete this snapshot
  6331. */
  6332. btrfs_unlock_up_safe(path, 0);
  6333. level = btrfs_header_level(root->node);
  6334. while (1) {
  6335. btrfs_tree_lock(path->nodes[level]);
  6336. btrfs_set_lock_blocking(path->nodes[level]);
  6337. ret = btrfs_lookup_extent_info(trans, root,
  6338. path->nodes[level]->start,
  6339. path->nodes[level]->len,
  6340. &wc->refs[level],
  6341. &wc->flags[level]);
  6342. if (ret < 0) {
  6343. err = ret;
  6344. goto out_end_trans;
  6345. }
  6346. BUG_ON(wc->refs[level] == 0);
  6347. if (level == root_item->drop_level)
  6348. break;
  6349. btrfs_tree_unlock(path->nodes[level]);
  6350. WARN_ON(wc->refs[level] != 1);
  6351. level--;
  6352. }
  6353. }
  6354. wc->level = level;
  6355. wc->shared_level = -1;
  6356. wc->stage = DROP_REFERENCE;
  6357. wc->update_ref = update_ref;
  6358. wc->keep_locks = 0;
  6359. wc->for_reloc = for_reloc;
  6360. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  6361. while (1) {
  6362. ret = walk_down_tree(trans, root, path, wc);
  6363. if (ret < 0) {
  6364. err = ret;
  6365. break;
  6366. }
  6367. ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
  6368. if (ret < 0) {
  6369. err = ret;
  6370. break;
  6371. }
  6372. if (ret > 0) {
  6373. BUG_ON(wc->stage != DROP_REFERENCE);
  6374. break;
  6375. }
  6376. if (wc->stage == DROP_REFERENCE) {
  6377. level = wc->level;
  6378. btrfs_node_key(path->nodes[level],
  6379. &root_item->drop_progress,
  6380. path->slots[level]);
  6381. root_item->drop_level = level;
  6382. }
  6383. BUG_ON(wc->level == 0);
  6384. if (btrfs_should_end_transaction(trans, tree_root)) {
  6385. ret = btrfs_update_root(trans, tree_root,
  6386. &root->root_key,
  6387. root_item);
  6388. if (ret) {
  6389. btrfs_abort_transaction(trans, tree_root, ret);
  6390. err = ret;
  6391. goto out_end_trans;
  6392. }
  6393. btrfs_end_transaction_throttle(trans, tree_root);
  6394. trans = btrfs_start_transaction(tree_root, 0);
  6395. if (IS_ERR(trans)) {
  6396. err = PTR_ERR(trans);
  6397. goto out_free;
  6398. }
  6399. if (block_rsv)
  6400. trans->block_rsv = block_rsv;
  6401. }
  6402. }
  6403. btrfs_release_path(path);
  6404. if (err)
  6405. goto out_end_trans;
  6406. ret = btrfs_del_root(trans, tree_root, &root->root_key);
  6407. if (ret) {
  6408. btrfs_abort_transaction(trans, tree_root, ret);
  6409. goto out_end_trans;
  6410. }
  6411. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  6412. ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
  6413. NULL, NULL);
  6414. if (ret < 0) {
  6415. btrfs_abort_transaction(trans, tree_root, ret);
  6416. err = ret;
  6417. goto out_end_trans;
  6418. } else if (ret > 0) {
  6419. /* if we fail to delete the orphan item this time
  6420. * around, it'll get picked up the next time.
  6421. *
  6422. * The most common failure here is just -ENOENT.
  6423. */
  6424. btrfs_del_orphan_item(trans, tree_root,
  6425. root->root_key.objectid);
  6426. }
  6427. }
  6428. if (root->in_radix) {
  6429. btrfs_free_fs_root(tree_root->fs_info, root);
  6430. } else {
  6431. free_extent_buffer(root->node);
  6432. free_extent_buffer(root->commit_root);
  6433. kfree(root);
  6434. }
  6435. out_end_trans:
  6436. btrfs_end_transaction_throttle(trans, tree_root);
  6437. out_free:
  6438. kfree(wc);
  6439. btrfs_free_path(path);
  6440. out:
  6441. if (err)
  6442. btrfs_std_error(root->fs_info, err);
  6443. return err;
  6444. }
  6445. /*
  6446. * drop subtree rooted at tree block 'node'.
  6447. *
  6448. * NOTE: this function will unlock and release tree block 'node'
  6449. * only used by relocation code
  6450. */
  6451. int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
  6452. struct btrfs_root *root,
  6453. struct extent_buffer *node,
  6454. struct extent_buffer *parent)
  6455. {
  6456. struct btrfs_path *path;
  6457. struct walk_control *wc;
  6458. int level;
  6459. int parent_level;
  6460. int ret = 0;
  6461. int wret;
  6462. BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  6463. path = btrfs_alloc_path();
  6464. if (!path)
  6465. return -ENOMEM;
  6466. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  6467. if (!wc) {
  6468. btrfs_free_path(path);
  6469. return -ENOMEM;
  6470. }
  6471. btrfs_assert_tree_locked(parent);
  6472. parent_level = btrfs_header_level(parent);
  6473. extent_buffer_get(parent);
  6474. path->nodes[parent_level] = parent;
  6475. path->slots[parent_level] = btrfs_header_nritems(parent);
  6476. btrfs_assert_tree_locked(node);
  6477. level = btrfs_header_level(node);
  6478. path->nodes[level] = node;
  6479. path->slots[level] = 0;
  6480. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6481. wc->refs[parent_level] = 1;
  6482. wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  6483. wc->level = level;
  6484. wc->shared_level = -1;
  6485. wc->stage = DROP_REFERENCE;
  6486. wc->update_ref = 0;
  6487. wc->keep_locks = 1;
  6488. wc->for_reloc = 1;
  6489. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  6490. while (1) {
  6491. wret = walk_down_tree(trans, root, path, wc);
  6492. if (wret < 0) {
  6493. ret = wret;
  6494. break;
  6495. }
  6496. wret = walk_up_tree(trans, root, path, wc, parent_level);
  6497. if (wret < 0)
  6498. ret = wret;
  6499. if (wret != 0)
  6500. break;
  6501. }
  6502. kfree(wc);
  6503. btrfs_free_path(path);
  6504. return ret;
  6505. }
  6506. static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
  6507. {
  6508. u64 num_devices;
  6509. u64 stripped;
  6510. /*
  6511. * if restripe for this chunk_type is on pick target profile and
  6512. * return, otherwise do the usual balance
  6513. */
  6514. stripped = get_restripe_target(root->fs_info, flags);
  6515. if (stripped)
  6516. return extended_to_chunk(stripped);
  6517. /*
  6518. * we add in the count of missing devices because we want
  6519. * to make sure that any RAID levels on a degraded FS
  6520. * continue to be honored.
  6521. */
  6522. num_devices = root->fs_info->fs_devices->rw_devices +
  6523. root->fs_info->fs_devices->missing_devices;
  6524. stripped = BTRFS_BLOCK_GROUP_RAID0 |
  6525. BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
  6526. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
  6527. if (num_devices == 1) {
  6528. stripped |= BTRFS_BLOCK_GROUP_DUP;
  6529. stripped = flags & ~stripped;
  6530. /* turn raid0 into single device chunks */
  6531. if (flags & BTRFS_BLOCK_GROUP_RAID0)
  6532. return stripped;
  6533. /* turn mirroring into duplication */
  6534. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  6535. BTRFS_BLOCK_GROUP_RAID10))
  6536. return stripped | BTRFS_BLOCK_GROUP_DUP;
  6537. } else {
  6538. /* they already had raid on here, just return */
  6539. if (flags & stripped)
  6540. return flags;
  6541. stripped |= BTRFS_BLOCK_GROUP_DUP;
  6542. stripped = flags & ~stripped;
  6543. /* switch duplicated blocks with raid1 */
  6544. if (flags & BTRFS_BLOCK_GROUP_DUP)
  6545. return stripped | BTRFS_BLOCK_GROUP_RAID1;
  6546. /* this is drive concat, leave it alone */
  6547. }
  6548. return flags;
  6549. }
  6550. static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
  6551. {
  6552. struct btrfs_space_info *sinfo = cache->space_info;
  6553. u64 num_bytes;
  6554. u64 min_allocable_bytes;
  6555. int ret = -ENOSPC;
  6556. /*
  6557. * We need some metadata space and system metadata space for
  6558. * allocating chunks in some corner cases until we force to set
  6559. * it to be readonly.
  6560. */
  6561. if ((sinfo->flags &
  6562. (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
  6563. !force)
  6564. min_allocable_bytes = 1 * 1024 * 1024;
  6565. else
  6566. min_allocable_bytes = 0;
  6567. spin_lock(&sinfo->lock);
  6568. spin_lock(&cache->lock);
  6569. if (cache->ro) {
  6570. ret = 0;
  6571. goto out;
  6572. }
  6573. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  6574. cache->bytes_super - btrfs_block_group_used(&cache->item);
  6575. if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
  6576. sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
  6577. min_allocable_bytes <= sinfo->total_bytes) {
  6578. sinfo->bytes_readonly += num_bytes;
  6579. cache->ro = 1;
  6580. ret = 0;
  6581. }
  6582. out:
  6583. spin_unlock(&cache->lock);
  6584. spin_unlock(&sinfo->lock);
  6585. return ret;
  6586. }
  6587. int btrfs_set_block_group_ro(struct btrfs_root *root,
  6588. struct btrfs_block_group_cache *cache)
  6589. {
  6590. struct btrfs_trans_handle *trans;
  6591. u64 alloc_flags;
  6592. int ret;
  6593. BUG_ON(cache->ro);
  6594. trans = btrfs_join_transaction(root);
  6595. if (IS_ERR(trans))
  6596. return PTR_ERR(trans);
  6597. alloc_flags = update_block_group_flags(root, cache->flags);
  6598. if (alloc_flags != cache->flags) {
  6599. ret = do_chunk_alloc(trans, root, alloc_flags,
  6600. CHUNK_ALLOC_FORCE);
  6601. if (ret < 0)
  6602. goto out;
  6603. }
  6604. ret = set_block_group_ro(cache, 0);
  6605. if (!ret)
  6606. goto out;
  6607. alloc_flags = get_alloc_profile(root, cache->space_info->flags);
  6608. ret = do_chunk_alloc(trans, root, alloc_flags,
  6609. CHUNK_ALLOC_FORCE);
  6610. if (ret < 0)
  6611. goto out;
  6612. ret = set_block_group_ro(cache, 0);
  6613. out:
  6614. btrfs_end_transaction(trans, root);
  6615. return ret;
  6616. }
  6617. int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
  6618. struct btrfs_root *root, u64 type)
  6619. {
  6620. u64 alloc_flags = get_alloc_profile(root, type);
  6621. return do_chunk_alloc(trans, root, alloc_flags,
  6622. CHUNK_ALLOC_FORCE);
  6623. }
  6624. /*
  6625. * helper to account the unused space of all the readonly block group in the
  6626. * list. takes mirrors into account.
  6627. */
  6628. static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
  6629. {
  6630. struct btrfs_block_group_cache *block_group;
  6631. u64 free_bytes = 0;
  6632. int factor;
  6633. list_for_each_entry(block_group, groups_list, list) {
  6634. spin_lock(&block_group->lock);
  6635. if (!block_group->ro) {
  6636. spin_unlock(&block_group->lock);
  6637. continue;
  6638. }
  6639. if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
  6640. BTRFS_BLOCK_GROUP_RAID10 |
  6641. BTRFS_BLOCK_GROUP_DUP))
  6642. factor = 2;
  6643. else
  6644. factor = 1;
  6645. free_bytes += (block_group->key.offset -
  6646. btrfs_block_group_used(&block_group->item)) *
  6647. factor;
  6648. spin_unlock(&block_group->lock);
  6649. }
  6650. return free_bytes;
  6651. }
  6652. /*
  6653. * helper to account the unused space of all the readonly block group in the
  6654. * space_info. takes mirrors into account.
  6655. */
  6656. u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
  6657. {
  6658. int i;
  6659. u64 free_bytes = 0;
  6660. spin_lock(&sinfo->lock);
  6661. for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  6662. if (!list_empty(&sinfo->block_groups[i]))
  6663. free_bytes += __btrfs_get_ro_block_group_free_space(
  6664. &sinfo->block_groups[i]);
  6665. spin_unlock(&sinfo->lock);
  6666. return free_bytes;
  6667. }
  6668. void btrfs_set_block_group_rw(struct btrfs_root *root,
  6669. struct btrfs_block_group_cache *cache)
  6670. {
  6671. struct btrfs_space_info *sinfo = cache->space_info;
  6672. u64 num_bytes;
  6673. BUG_ON(!cache->ro);
  6674. spin_lock(&sinfo->lock);
  6675. spin_lock(&cache->lock);
  6676. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  6677. cache->bytes_super - btrfs_block_group_used(&cache->item);
  6678. sinfo->bytes_readonly -= num_bytes;
  6679. cache->ro = 0;
  6680. spin_unlock(&cache->lock);
  6681. spin_unlock(&sinfo->lock);
  6682. }
  6683. /*
  6684. * checks to see if its even possible to relocate this block group.
  6685. *
  6686. * @return - -1 if it's not a good idea to relocate this block group, 0 if its
  6687. * ok to go ahead and try.
  6688. */
  6689. int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
  6690. {
  6691. struct btrfs_block_group_cache *block_group;
  6692. struct btrfs_space_info *space_info;
  6693. struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
  6694. struct btrfs_device *device;
  6695. u64 min_free;
  6696. u64 dev_min = 1;
  6697. u64 dev_nr = 0;
  6698. u64 target;
  6699. int index;
  6700. int full = 0;
  6701. int ret = 0;
  6702. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  6703. /* odd, couldn't find the block group, leave it alone */
  6704. if (!block_group)
  6705. return -1;
  6706. min_free = btrfs_block_group_used(&block_group->item);
  6707. /* no bytes used, we're good */
  6708. if (!min_free)
  6709. goto out;
  6710. space_info = block_group->space_info;
  6711. spin_lock(&space_info->lock);
  6712. full = space_info->full;
  6713. /*
  6714. * if this is the last block group we have in this space, we can't
  6715. * relocate it unless we're able to allocate a new chunk below.
  6716. *
  6717. * Otherwise, we need to make sure we have room in the space to handle
  6718. * all of the extents from this block group. If we can, we're good
  6719. */
  6720. if ((space_info->total_bytes != block_group->key.offset) &&
  6721. (space_info->bytes_used + space_info->bytes_reserved +
  6722. space_info->bytes_pinned + space_info->bytes_readonly +
  6723. min_free < space_info->total_bytes)) {
  6724. spin_unlock(&space_info->lock);
  6725. goto out;
  6726. }
  6727. spin_unlock(&space_info->lock);
  6728. /*
  6729. * ok we don't have enough space, but maybe we have free space on our
  6730. * devices to allocate new chunks for relocation, so loop through our
  6731. * alloc devices and guess if we have enough space. if this block
  6732. * group is going to be restriped, run checks against the target
  6733. * profile instead of the current one.
  6734. */
  6735. ret = -1;
  6736. /*
  6737. * index:
  6738. * 0: raid10
  6739. * 1: raid1
  6740. * 2: dup
  6741. * 3: raid0
  6742. * 4: single
  6743. */
  6744. target = get_restripe_target(root->fs_info, block_group->flags);
  6745. if (target) {
  6746. index = __get_raid_index(extended_to_chunk(target));
  6747. } else {
  6748. /*
  6749. * this is just a balance, so if we were marked as full
  6750. * we know there is no space for a new chunk
  6751. */
  6752. if (full)
  6753. goto out;
  6754. index = get_block_group_index(block_group);
  6755. }
  6756. if (index == BTRFS_RAID_RAID10) {
  6757. dev_min = 4;
  6758. /* Divide by 2 */
  6759. min_free >>= 1;
  6760. } else if (index == BTRFS_RAID_RAID1) {
  6761. dev_min = 2;
  6762. } else if (index == BTRFS_RAID_DUP) {
  6763. /* Multiply by 2 */
  6764. min_free <<= 1;
  6765. } else if (index == BTRFS_RAID_RAID0) {
  6766. dev_min = fs_devices->rw_devices;
  6767. do_div(min_free, dev_min);
  6768. }
  6769. mutex_lock(&root->fs_info->chunk_mutex);
  6770. list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
  6771. u64 dev_offset;
  6772. /*
  6773. * check to make sure we can actually find a chunk with enough
  6774. * space to fit our block group in.
  6775. */
  6776. if (device->total_bytes > device->bytes_used + min_free &&
  6777. !device->is_tgtdev_for_dev_replace) {
  6778. ret = find_free_dev_extent(device, min_free,
  6779. &dev_offset, NULL);
  6780. if (!ret)
  6781. dev_nr++;
  6782. if (dev_nr >= dev_min)
  6783. break;
  6784. ret = -1;
  6785. }
  6786. }
  6787. mutex_unlock(&root->fs_info->chunk_mutex);
  6788. out:
  6789. btrfs_put_block_group(block_group);
  6790. return ret;
  6791. }
  6792. static int find_first_block_group(struct btrfs_root *root,
  6793. struct btrfs_path *path, struct btrfs_key *key)
  6794. {
  6795. int ret = 0;
  6796. struct btrfs_key found_key;
  6797. struct extent_buffer *leaf;
  6798. int slot;
  6799. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  6800. if (ret < 0)
  6801. goto out;
  6802. while (1) {
  6803. slot = path->slots[0];
  6804. leaf = path->nodes[0];
  6805. if (slot >= btrfs_header_nritems(leaf)) {
  6806. ret = btrfs_next_leaf(root, path);
  6807. if (ret == 0)
  6808. continue;
  6809. if (ret < 0)
  6810. goto out;
  6811. break;
  6812. }
  6813. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  6814. if (found_key.objectid >= key->objectid &&
  6815. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  6816. ret = 0;
  6817. goto out;
  6818. }
  6819. path->slots[0]++;
  6820. }
  6821. out:
  6822. return ret;
  6823. }
  6824. void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
  6825. {
  6826. struct btrfs_block_group_cache *block_group;
  6827. u64 last = 0;
  6828. while (1) {
  6829. struct inode *inode;
  6830. block_group = btrfs_lookup_first_block_group(info, last);
  6831. while (block_group) {
  6832. spin_lock(&block_group->lock);
  6833. if (block_group->iref)
  6834. break;
  6835. spin_unlock(&block_group->lock);
  6836. block_group = next_block_group(info->tree_root,
  6837. block_group);
  6838. }
  6839. if (!block_group) {
  6840. if (last == 0)
  6841. break;
  6842. last = 0;
  6843. continue;
  6844. }
  6845. inode = block_group->inode;
  6846. block_group->iref = 0;
  6847. block_group->inode = NULL;
  6848. spin_unlock(&block_group->lock);
  6849. iput(inode);
  6850. last = block_group->key.objectid + block_group->key.offset;
  6851. btrfs_put_block_group(block_group);
  6852. }
  6853. }
  6854. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  6855. {
  6856. struct btrfs_block_group_cache *block_group;
  6857. struct btrfs_space_info *space_info;
  6858. struct btrfs_caching_control *caching_ctl;
  6859. struct rb_node *n;
  6860. down_write(&info->extent_commit_sem);
  6861. while (!list_empty(&info->caching_block_groups)) {
  6862. caching_ctl = list_entry(info->caching_block_groups.next,
  6863. struct btrfs_caching_control, list);
  6864. list_del(&caching_ctl->list);
  6865. put_caching_control(caching_ctl);
  6866. }
  6867. up_write(&info->extent_commit_sem);
  6868. spin_lock(&info->block_group_cache_lock);
  6869. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  6870. block_group = rb_entry(n, struct btrfs_block_group_cache,
  6871. cache_node);
  6872. rb_erase(&block_group->cache_node,
  6873. &info->block_group_cache_tree);
  6874. spin_unlock(&info->block_group_cache_lock);
  6875. down_write(&block_group->space_info->groups_sem);
  6876. list_del(&block_group->list);
  6877. up_write(&block_group->space_info->groups_sem);
  6878. if (block_group->cached == BTRFS_CACHE_STARTED)
  6879. wait_block_group_cache_done(block_group);
  6880. /*
  6881. * We haven't cached this block group, which means we could
  6882. * possibly have excluded extents on this block group.
  6883. */
  6884. if (block_group->cached == BTRFS_CACHE_NO)
  6885. free_excluded_extents(info->extent_root, block_group);
  6886. btrfs_remove_free_space_cache(block_group);
  6887. btrfs_put_block_group(block_group);
  6888. spin_lock(&info->block_group_cache_lock);
  6889. }
  6890. spin_unlock(&info->block_group_cache_lock);
  6891. /* now that all the block groups are freed, go through and
  6892. * free all the space_info structs. This is only called during
  6893. * the final stages of unmount, and so we know nobody is
  6894. * using them. We call synchronize_rcu() once before we start,
  6895. * just to be on the safe side.
  6896. */
  6897. synchronize_rcu();
  6898. release_global_block_rsv(info);
  6899. while(!list_empty(&info->space_info)) {
  6900. space_info = list_entry(info->space_info.next,
  6901. struct btrfs_space_info,
  6902. list);
  6903. if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
  6904. if (space_info->bytes_pinned > 0 ||
  6905. space_info->bytes_reserved > 0 ||
  6906. space_info->bytes_may_use > 0) {
  6907. WARN_ON(1);
  6908. dump_space_info(space_info, 0, 0);
  6909. }
  6910. }
  6911. list_del(&space_info->list);
  6912. kfree(space_info);
  6913. }
  6914. return 0;
  6915. }
  6916. static void __link_block_group(struct btrfs_space_info *space_info,
  6917. struct btrfs_block_group_cache *cache)
  6918. {
  6919. int index = get_block_group_index(cache);
  6920. down_write(&space_info->groups_sem);
  6921. list_add_tail(&cache->list, &space_info->block_groups[index]);
  6922. up_write(&space_info->groups_sem);
  6923. }
  6924. int btrfs_read_block_groups(struct btrfs_root *root)
  6925. {
  6926. struct btrfs_path *path;
  6927. int ret;
  6928. struct btrfs_block_group_cache *cache;
  6929. struct btrfs_fs_info *info = root->fs_info;
  6930. struct btrfs_space_info *space_info;
  6931. struct btrfs_key key;
  6932. struct btrfs_key found_key;
  6933. struct extent_buffer *leaf;
  6934. int need_clear = 0;
  6935. u64 cache_gen;
  6936. root = info->extent_root;
  6937. key.objectid = 0;
  6938. key.offset = 0;
  6939. btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
  6940. path = btrfs_alloc_path();
  6941. if (!path)
  6942. return -ENOMEM;
  6943. path->reada = 1;
  6944. cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
  6945. if (btrfs_test_opt(root, SPACE_CACHE) &&
  6946. btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
  6947. need_clear = 1;
  6948. if (btrfs_test_opt(root, CLEAR_CACHE))
  6949. need_clear = 1;
  6950. while (1) {
  6951. ret = find_first_block_group(root, path, &key);
  6952. if (ret > 0)
  6953. break;
  6954. if (ret != 0)
  6955. goto error;
  6956. leaf = path->nodes[0];
  6957. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  6958. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  6959. if (!cache) {
  6960. ret = -ENOMEM;
  6961. goto error;
  6962. }
  6963. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  6964. GFP_NOFS);
  6965. if (!cache->free_space_ctl) {
  6966. kfree(cache);
  6967. ret = -ENOMEM;
  6968. goto error;
  6969. }
  6970. atomic_set(&cache->count, 1);
  6971. spin_lock_init(&cache->lock);
  6972. cache->fs_info = info;
  6973. INIT_LIST_HEAD(&cache->list);
  6974. INIT_LIST_HEAD(&cache->cluster_list);
  6975. if (need_clear) {
  6976. /*
  6977. * When we mount with old space cache, we need to
  6978. * set BTRFS_DC_CLEAR and set dirty flag.
  6979. *
  6980. * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
  6981. * truncate the old free space cache inode and
  6982. * setup a new one.
  6983. * b) Setting 'dirty flag' makes sure that we flush
  6984. * the new space cache info onto disk.
  6985. */
  6986. cache->disk_cache_state = BTRFS_DC_CLEAR;
  6987. if (btrfs_test_opt(root, SPACE_CACHE))
  6988. cache->dirty = 1;
  6989. }
  6990. read_extent_buffer(leaf, &cache->item,
  6991. btrfs_item_ptr_offset(leaf, path->slots[0]),
  6992. sizeof(cache->item));
  6993. memcpy(&cache->key, &found_key, sizeof(found_key));
  6994. key.objectid = found_key.objectid + found_key.offset;
  6995. btrfs_release_path(path);
  6996. cache->flags = btrfs_block_group_flags(&cache->item);
  6997. cache->sectorsize = root->sectorsize;
  6998. cache->full_stripe_len = btrfs_full_stripe_len(root,
  6999. &root->fs_info->mapping_tree,
  7000. found_key.objectid);
  7001. btrfs_init_free_space_ctl(cache);
  7002. /*
  7003. * We need to exclude the super stripes now so that the space
  7004. * info has super bytes accounted for, otherwise we'll think
  7005. * we have more space than we actually do.
  7006. */
  7007. exclude_super_stripes(root, cache);
  7008. /*
  7009. * check for two cases, either we are full, and therefore
  7010. * don't need to bother with the caching work since we won't
  7011. * find any space, or we are empty, and we can just add all
  7012. * the space in and be done with it. This saves us _alot_ of
  7013. * time, particularly in the full case.
  7014. */
  7015. if (found_key.offset == btrfs_block_group_used(&cache->item)) {
  7016. cache->last_byte_to_unpin = (u64)-1;
  7017. cache->cached = BTRFS_CACHE_FINISHED;
  7018. free_excluded_extents(root, cache);
  7019. } else if (btrfs_block_group_used(&cache->item) == 0) {
  7020. cache->last_byte_to_unpin = (u64)-1;
  7021. cache->cached = BTRFS_CACHE_FINISHED;
  7022. add_new_free_space(cache, root->fs_info,
  7023. found_key.objectid,
  7024. found_key.objectid +
  7025. found_key.offset);
  7026. free_excluded_extents(root, cache);
  7027. }
  7028. ret = update_space_info(info, cache->flags, found_key.offset,
  7029. btrfs_block_group_used(&cache->item),
  7030. &space_info);
  7031. BUG_ON(ret); /* -ENOMEM */
  7032. cache->space_info = space_info;
  7033. spin_lock(&cache->space_info->lock);
  7034. cache->space_info->bytes_readonly += cache->bytes_super;
  7035. spin_unlock(&cache->space_info->lock);
  7036. __link_block_group(space_info, cache);
  7037. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  7038. BUG_ON(ret); /* Logic error */
  7039. set_avail_alloc_bits(root->fs_info, cache->flags);
  7040. if (btrfs_chunk_readonly(root, cache->key.objectid))
  7041. set_block_group_ro(cache, 1);
  7042. }
  7043. list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
  7044. if (!(get_alloc_profile(root, space_info->flags) &
  7045. (BTRFS_BLOCK_GROUP_RAID10 |
  7046. BTRFS_BLOCK_GROUP_RAID1 |
  7047. BTRFS_BLOCK_GROUP_RAID5 |
  7048. BTRFS_BLOCK_GROUP_RAID6 |
  7049. BTRFS_BLOCK_GROUP_DUP)))
  7050. continue;
  7051. /*
  7052. * avoid allocating from un-mirrored block group if there are
  7053. * mirrored block groups.
  7054. */
  7055. list_for_each_entry(cache, &space_info->block_groups[3], list)
  7056. set_block_group_ro(cache, 1);
  7057. list_for_each_entry(cache, &space_info->block_groups[4], list)
  7058. set_block_group_ro(cache, 1);
  7059. }
  7060. init_global_block_rsv(info);
  7061. ret = 0;
  7062. error:
  7063. btrfs_free_path(path);
  7064. return ret;
  7065. }
  7066. void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
  7067. struct btrfs_root *root)
  7068. {
  7069. struct btrfs_block_group_cache *block_group, *tmp;
  7070. struct btrfs_root *extent_root = root->fs_info->extent_root;
  7071. struct btrfs_block_group_item item;
  7072. struct btrfs_key key;
  7073. int ret = 0;
  7074. list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
  7075. new_bg_list) {
  7076. list_del_init(&block_group->new_bg_list);
  7077. if (ret)
  7078. continue;
  7079. spin_lock(&block_group->lock);
  7080. memcpy(&item, &block_group->item, sizeof(item));
  7081. memcpy(&key, &block_group->key, sizeof(key));
  7082. spin_unlock(&block_group->lock);
  7083. ret = btrfs_insert_item(trans, extent_root, &key, &item,
  7084. sizeof(item));
  7085. if (ret)
  7086. btrfs_abort_transaction(trans, extent_root, ret);
  7087. }
  7088. }
  7089. int btrfs_make_block_group(struct btrfs_trans_handle *trans,
  7090. struct btrfs_root *root, u64 bytes_used,
  7091. u64 type, u64 chunk_objectid, u64 chunk_offset,
  7092. u64 size)
  7093. {
  7094. int ret;
  7095. struct btrfs_root *extent_root;
  7096. struct btrfs_block_group_cache *cache;
  7097. extent_root = root->fs_info->extent_root;
  7098. root->fs_info->last_trans_log_full_commit = trans->transid;
  7099. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  7100. if (!cache)
  7101. return -ENOMEM;
  7102. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  7103. GFP_NOFS);
  7104. if (!cache->free_space_ctl) {
  7105. kfree(cache);
  7106. return -ENOMEM;
  7107. }
  7108. cache->key.objectid = chunk_offset;
  7109. cache->key.offset = size;
  7110. cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  7111. cache->sectorsize = root->sectorsize;
  7112. cache->fs_info = root->fs_info;
  7113. cache->full_stripe_len = btrfs_full_stripe_len(root,
  7114. &root->fs_info->mapping_tree,
  7115. chunk_offset);
  7116. atomic_set(&cache->count, 1);
  7117. spin_lock_init(&cache->lock);
  7118. INIT_LIST_HEAD(&cache->list);
  7119. INIT_LIST_HEAD(&cache->cluster_list);
  7120. INIT_LIST_HEAD(&cache->new_bg_list);
  7121. btrfs_init_free_space_ctl(cache);
  7122. btrfs_set_block_group_used(&cache->item, bytes_used);
  7123. btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
  7124. cache->flags = type;
  7125. btrfs_set_block_group_flags(&cache->item, type);
  7126. cache->last_byte_to_unpin = (u64)-1;
  7127. cache->cached = BTRFS_CACHE_FINISHED;
  7128. exclude_super_stripes(root, cache);
  7129. add_new_free_space(cache, root->fs_info, chunk_offset,
  7130. chunk_offset + size);
  7131. free_excluded_extents(root, cache);
  7132. ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
  7133. &cache->space_info);
  7134. BUG_ON(ret); /* -ENOMEM */
  7135. update_global_block_rsv(root->fs_info);
  7136. spin_lock(&cache->space_info->lock);
  7137. cache->space_info->bytes_readonly += cache->bytes_super;
  7138. spin_unlock(&cache->space_info->lock);
  7139. __link_block_group(cache->space_info, cache);
  7140. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  7141. BUG_ON(ret); /* Logic error */
  7142. list_add_tail(&cache->new_bg_list, &trans->new_bgs);
  7143. set_avail_alloc_bits(extent_root->fs_info, type);
  7144. return 0;
  7145. }
  7146. static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  7147. {
  7148. u64 extra_flags = chunk_to_extended(flags) &
  7149. BTRFS_EXTENDED_PROFILE_MASK;
  7150. write_seqlock(&fs_info->profiles_lock);
  7151. if (flags & BTRFS_BLOCK_GROUP_DATA)
  7152. fs_info->avail_data_alloc_bits &= ~extra_flags;
  7153. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  7154. fs_info->avail_metadata_alloc_bits &= ~extra_flags;
  7155. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  7156. fs_info->avail_system_alloc_bits &= ~extra_flags;
  7157. write_sequnlock(&fs_info->profiles_lock);
  7158. }
  7159. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  7160. struct btrfs_root *root, u64 group_start)
  7161. {
  7162. struct btrfs_path *path;
  7163. struct btrfs_block_group_cache *block_group;
  7164. struct btrfs_free_cluster *cluster;
  7165. struct btrfs_root *tree_root = root->fs_info->tree_root;
  7166. struct btrfs_key key;
  7167. struct inode *inode;
  7168. int ret;
  7169. int index;
  7170. int factor;
  7171. root = root->fs_info->extent_root;
  7172. block_group = btrfs_lookup_block_group(root->fs_info, group_start);
  7173. BUG_ON(!block_group);
  7174. BUG_ON(!block_group->ro);
  7175. /*
  7176. * Free the reserved super bytes from this block group before
  7177. * remove it.
  7178. */
  7179. free_excluded_extents(root, block_group);
  7180. memcpy(&key, &block_group->key, sizeof(key));
  7181. index = get_block_group_index(block_group);
  7182. if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
  7183. BTRFS_BLOCK_GROUP_RAID1 |
  7184. BTRFS_BLOCK_GROUP_RAID10))
  7185. factor = 2;
  7186. else
  7187. factor = 1;
  7188. /* make sure this block group isn't part of an allocation cluster */
  7189. cluster = &root->fs_info->data_alloc_cluster;
  7190. spin_lock(&cluster->refill_lock);
  7191. btrfs_return_cluster_to_free_space(block_group, cluster);
  7192. spin_unlock(&cluster->refill_lock);
  7193. /*
  7194. * make sure this block group isn't part of a metadata
  7195. * allocation cluster
  7196. */
  7197. cluster = &root->fs_info->meta_alloc_cluster;
  7198. spin_lock(&cluster->refill_lock);
  7199. btrfs_return_cluster_to_free_space(block_group, cluster);
  7200. spin_unlock(&cluster->refill_lock);
  7201. path = btrfs_alloc_path();
  7202. if (!path) {
  7203. ret = -ENOMEM;
  7204. goto out;
  7205. }
  7206. inode = lookup_free_space_inode(tree_root, block_group, path);
  7207. if (!IS_ERR(inode)) {
  7208. ret = btrfs_orphan_add(trans, inode);
  7209. if (ret) {
  7210. btrfs_add_delayed_iput(inode);
  7211. goto out;
  7212. }
  7213. clear_nlink(inode);
  7214. /* One for the block groups ref */
  7215. spin_lock(&block_group->lock);
  7216. if (block_group->iref) {
  7217. block_group->iref = 0;
  7218. block_group->inode = NULL;
  7219. spin_unlock(&block_group->lock);
  7220. iput(inode);
  7221. } else {
  7222. spin_unlock(&block_group->lock);
  7223. }
  7224. /* One for our lookup ref */
  7225. btrfs_add_delayed_iput(inode);
  7226. }
  7227. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  7228. key.offset = block_group->key.objectid;
  7229. key.type = 0;
  7230. ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
  7231. if (ret < 0)
  7232. goto out;
  7233. if (ret > 0)
  7234. btrfs_release_path(path);
  7235. if (ret == 0) {
  7236. ret = btrfs_del_item(trans, tree_root, path);
  7237. if (ret)
  7238. goto out;
  7239. btrfs_release_path(path);
  7240. }
  7241. spin_lock(&root->fs_info->block_group_cache_lock);
  7242. rb_erase(&block_group->cache_node,
  7243. &root->fs_info->block_group_cache_tree);
  7244. if (root->fs_info->first_logical_byte == block_group->key.objectid)
  7245. root->fs_info->first_logical_byte = (u64)-1;
  7246. spin_unlock(&root->fs_info->block_group_cache_lock);
  7247. down_write(&block_group->space_info->groups_sem);
  7248. /*
  7249. * we must use list_del_init so people can check to see if they
  7250. * are still on the list after taking the semaphore
  7251. */
  7252. list_del_init(&block_group->list);
  7253. if (list_empty(&block_group->space_info->block_groups[index]))
  7254. clear_avail_alloc_bits(root->fs_info, block_group->flags);
  7255. up_write(&block_group->space_info->groups_sem);
  7256. if (block_group->cached == BTRFS_CACHE_STARTED)
  7257. wait_block_group_cache_done(block_group);
  7258. btrfs_remove_free_space_cache(block_group);
  7259. spin_lock(&block_group->space_info->lock);
  7260. block_group->space_info->total_bytes -= block_group->key.offset;
  7261. block_group->space_info->bytes_readonly -= block_group->key.offset;
  7262. block_group->space_info->disk_total -= block_group->key.offset * factor;
  7263. spin_unlock(&block_group->space_info->lock);
  7264. memcpy(&key, &block_group->key, sizeof(key));
  7265. btrfs_clear_space_info_full(root->fs_info);
  7266. btrfs_put_block_group(block_group);
  7267. btrfs_put_block_group(block_group);
  7268. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  7269. if (ret > 0)
  7270. ret = -EIO;
  7271. if (ret < 0)
  7272. goto out;
  7273. ret = btrfs_del_item(trans, root, path);
  7274. out:
  7275. btrfs_free_path(path);
  7276. return ret;
  7277. }
  7278. int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
  7279. {
  7280. struct btrfs_space_info *space_info;
  7281. struct btrfs_super_block *disk_super;
  7282. u64 features;
  7283. u64 flags;
  7284. int mixed = 0;
  7285. int ret;
  7286. disk_super = fs_info->super_copy;
  7287. if (!btrfs_super_root(disk_super))
  7288. return 1;
  7289. features = btrfs_super_incompat_flags(disk_super);
  7290. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  7291. mixed = 1;
  7292. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  7293. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7294. if (ret)
  7295. goto out;
  7296. if (mixed) {
  7297. flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
  7298. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7299. } else {
  7300. flags = BTRFS_BLOCK_GROUP_METADATA;
  7301. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7302. if (ret)
  7303. goto out;
  7304. flags = BTRFS_BLOCK_GROUP_DATA;
  7305. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  7306. }
  7307. out:
  7308. return ret;
  7309. }
  7310. int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  7311. {
  7312. return unpin_extent_range(root, start, end);
  7313. }
  7314. int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
  7315. u64 num_bytes, u64 *actual_bytes)
  7316. {
  7317. return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
  7318. }
  7319. int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
  7320. {
  7321. struct btrfs_fs_info *fs_info = root->fs_info;
  7322. struct btrfs_block_group_cache *cache = NULL;
  7323. u64 group_trimmed;
  7324. u64 start;
  7325. u64 end;
  7326. u64 trimmed = 0;
  7327. u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
  7328. int ret = 0;
  7329. /*
  7330. * try to trim all FS space, our block group may start from non-zero.
  7331. */
  7332. if (range->len == total_bytes)
  7333. cache = btrfs_lookup_first_block_group(fs_info, range->start);
  7334. else
  7335. cache = btrfs_lookup_block_group(fs_info, range->start);
  7336. while (cache) {
  7337. if (cache->key.objectid >= (range->start + range->len)) {
  7338. btrfs_put_block_group(cache);
  7339. break;
  7340. }
  7341. start = max(range->start, cache->key.objectid);
  7342. end = min(range->start + range->len,
  7343. cache->key.objectid + cache->key.offset);
  7344. if (end - start >= range->minlen) {
  7345. if (!block_group_cache_done(cache)) {
  7346. ret = cache_block_group(cache, 0);
  7347. if (!ret)
  7348. wait_block_group_cache_done(cache);
  7349. }
  7350. ret = btrfs_trim_block_group(cache,
  7351. &group_trimmed,
  7352. start,
  7353. end,
  7354. range->minlen);
  7355. trimmed += group_trimmed;
  7356. if (ret) {
  7357. btrfs_put_block_group(cache);
  7358. break;
  7359. }
  7360. }
  7361. cache = next_block_group(fs_info->tree_root, cache);
  7362. }
  7363. range->len = trimmed;
  7364. return ret;
  7365. }