extent-tree.c 191 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/sort.h>
  23. #include <linux/rcupdate.h>
  24. #include <linux/kthread.h>
  25. #include <linux/slab.h>
  26. #include "compat.h"
  27. #include "hash.h"
  28. #include "ctree.h"
  29. #include "disk-io.h"
  30. #include "print-tree.h"
  31. #include "transaction.h"
  32. #include "volumes.h"
  33. #include "locking.h"
  34. #include "free-space-cache.h"
  35. /* control flags for do_chunk_alloc's force field
  36. * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
  37. * if we really need one.
  38. *
  39. * CHUNK_ALLOC_FORCE means it must try to allocate one
  40. *
  41. * CHUNK_ALLOC_LIMITED means to only try and allocate one
  42. * if we have very few chunks already allocated. This is
  43. * used as part of the clustering code to help make sure
  44. * we have a good pool of storage to cluster in, without
  45. * filling the FS with empty chunks
  46. *
  47. */
  48. enum {
  49. CHUNK_ALLOC_NO_FORCE = 0,
  50. CHUNK_ALLOC_FORCE = 1,
  51. CHUNK_ALLOC_LIMITED = 2,
  52. };
  53. static int update_block_group(struct btrfs_trans_handle *trans,
  54. struct btrfs_root *root,
  55. u64 bytenr, u64 num_bytes, int alloc);
  56. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  57. struct btrfs_root *root,
  58. u64 bytenr, u64 num_bytes, u64 parent,
  59. u64 root_objectid, u64 owner_objectid,
  60. u64 owner_offset, int refs_to_drop,
  61. struct btrfs_delayed_extent_op *extra_op);
  62. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  63. struct extent_buffer *leaf,
  64. struct btrfs_extent_item *ei);
  65. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  66. struct btrfs_root *root,
  67. u64 parent, u64 root_objectid,
  68. u64 flags, u64 owner, u64 offset,
  69. struct btrfs_key *ins, int ref_mod);
  70. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  71. struct btrfs_root *root,
  72. u64 parent, u64 root_objectid,
  73. u64 flags, struct btrfs_disk_key *key,
  74. int level, struct btrfs_key *ins);
  75. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  76. struct btrfs_root *extent_root, u64 alloc_bytes,
  77. u64 flags, int force);
  78. static int find_next_key(struct btrfs_path *path, int level,
  79. struct btrfs_key *key);
  80. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  81. int dump_block_groups);
  82. static noinline int
  83. block_group_cache_done(struct btrfs_block_group_cache *cache)
  84. {
  85. smp_mb();
  86. return cache->cached == BTRFS_CACHE_FINISHED;
  87. }
  88. static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
  89. {
  90. return (cache->flags & bits) == bits;
  91. }
  92. static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
  93. {
  94. atomic_inc(&cache->count);
  95. }
  96. void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
  97. {
  98. if (atomic_dec_and_test(&cache->count)) {
  99. WARN_ON(cache->pinned > 0);
  100. WARN_ON(cache->reserved > 0);
  101. WARN_ON(cache->reserved_pinned > 0);
  102. kfree(cache->free_space_ctl);
  103. kfree(cache);
  104. }
  105. }
  106. /*
  107. * this adds the block group to the fs_info rb tree for the block group
  108. * cache
  109. */
  110. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  111. struct btrfs_block_group_cache *block_group)
  112. {
  113. struct rb_node **p;
  114. struct rb_node *parent = NULL;
  115. struct btrfs_block_group_cache *cache;
  116. spin_lock(&info->block_group_cache_lock);
  117. p = &info->block_group_cache_tree.rb_node;
  118. while (*p) {
  119. parent = *p;
  120. cache = rb_entry(parent, struct btrfs_block_group_cache,
  121. cache_node);
  122. if (block_group->key.objectid < cache->key.objectid) {
  123. p = &(*p)->rb_left;
  124. } else if (block_group->key.objectid > cache->key.objectid) {
  125. p = &(*p)->rb_right;
  126. } else {
  127. spin_unlock(&info->block_group_cache_lock);
  128. return -EEXIST;
  129. }
  130. }
  131. rb_link_node(&block_group->cache_node, parent, p);
  132. rb_insert_color(&block_group->cache_node,
  133. &info->block_group_cache_tree);
  134. spin_unlock(&info->block_group_cache_lock);
  135. return 0;
  136. }
  137. /*
  138. * This will return the block group at or after bytenr if contains is 0, else
  139. * it will return the block group that contains the bytenr
  140. */
  141. static struct btrfs_block_group_cache *
  142. block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
  143. int contains)
  144. {
  145. struct btrfs_block_group_cache *cache, *ret = NULL;
  146. struct rb_node *n;
  147. u64 end, start;
  148. spin_lock(&info->block_group_cache_lock);
  149. n = info->block_group_cache_tree.rb_node;
  150. while (n) {
  151. cache = rb_entry(n, struct btrfs_block_group_cache,
  152. cache_node);
  153. end = cache->key.objectid + cache->key.offset - 1;
  154. start = cache->key.objectid;
  155. if (bytenr < start) {
  156. if (!contains && (!ret || start < ret->key.objectid))
  157. ret = cache;
  158. n = n->rb_left;
  159. } else if (bytenr > start) {
  160. if (contains && bytenr <= end) {
  161. ret = cache;
  162. break;
  163. }
  164. n = n->rb_right;
  165. } else {
  166. ret = cache;
  167. break;
  168. }
  169. }
  170. if (ret)
  171. btrfs_get_block_group(ret);
  172. spin_unlock(&info->block_group_cache_lock);
  173. return ret;
  174. }
  175. static int add_excluded_extent(struct btrfs_root *root,
  176. u64 start, u64 num_bytes)
  177. {
  178. u64 end = start + num_bytes - 1;
  179. set_extent_bits(&root->fs_info->freed_extents[0],
  180. start, end, EXTENT_UPTODATE, GFP_NOFS);
  181. set_extent_bits(&root->fs_info->freed_extents[1],
  182. start, end, EXTENT_UPTODATE, GFP_NOFS);
  183. return 0;
  184. }
  185. static void free_excluded_extents(struct btrfs_root *root,
  186. struct btrfs_block_group_cache *cache)
  187. {
  188. u64 start, end;
  189. start = cache->key.objectid;
  190. end = start + cache->key.offset - 1;
  191. clear_extent_bits(&root->fs_info->freed_extents[0],
  192. start, end, EXTENT_UPTODATE, GFP_NOFS);
  193. clear_extent_bits(&root->fs_info->freed_extents[1],
  194. start, end, EXTENT_UPTODATE, GFP_NOFS);
  195. }
  196. static int exclude_super_stripes(struct btrfs_root *root,
  197. struct btrfs_block_group_cache *cache)
  198. {
  199. u64 bytenr;
  200. u64 *logical;
  201. int stripe_len;
  202. int i, nr, ret;
  203. if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
  204. stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
  205. cache->bytes_super += stripe_len;
  206. ret = add_excluded_extent(root, cache->key.objectid,
  207. stripe_len);
  208. BUG_ON(ret);
  209. }
  210. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  211. bytenr = btrfs_sb_offset(i);
  212. ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
  213. cache->key.objectid, bytenr,
  214. 0, &logical, &nr, &stripe_len);
  215. BUG_ON(ret);
  216. while (nr--) {
  217. cache->bytes_super += stripe_len;
  218. ret = add_excluded_extent(root, logical[nr],
  219. stripe_len);
  220. BUG_ON(ret);
  221. }
  222. kfree(logical);
  223. }
  224. return 0;
  225. }
  226. static struct btrfs_caching_control *
  227. get_caching_control(struct btrfs_block_group_cache *cache)
  228. {
  229. struct btrfs_caching_control *ctl;
  230. spin_lock(&cache->lock);
  231. if (cache->cached != BTRFS_CACHE_STARTED) {
  232. spin_unlock(&cache->lock);
  233. return NULL;
  234. }
  235. /* We're loading it the fast way, so we don't have a caching_ctl. */
  236. if (!cache->caching_ctl) {
  237. spin_unlock(&cache->lock);
  238. return NULL;
  239. }
  240. ctl = cache->caching_ctl;
  241. atomic_inc(&ctl->count);
  242. spin_unlock(&cache->lock);
  243. return ctl;
  244. }
  245. static void put_caching_control(struct btrfs_caching_control *ctl)
  246. {
  247. if (atomic_dec_and_test(&ctl->count))
  248. kfree(ctl);
  249. }
  250. /*
  251. * this is only called by cache_block_group, since we could have freed extents
  252. * we need to check the pinned_extents for any extents that can't be used yet
  253. * since their free space will be released as soon as the transaction commits.
  254. */
  255. static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
  256. struct btrfs_fs_info *info, u64 start, u64 end)
  257. {
  258. u64 extent_start, extent_end, size, total_added = 0;
  259. int ret;
  260. while (start < end) {
  261. ret = find_first_extent_bit(info->pinned_extents, start,
  262. &extent_start, &extent_end,
  263. EXTENT_DIRTY | EXTENT_UPTODATE);
  264. if (ret)
  265. break;
  266. if (extent_start <= start) {
  267. start = extent_end + 1;
  268. } else if (extent_start > start && extent_start < end) {
  269. size = extent_start - start;
  270. total_added += size;
  271. ret = btrfs_add_free_space(block_group, start,
  272. size);
  273. BUG_ON(ret);
  274. start = extent_end + 1;
  275. } else {
  276. break;
  277. }
  278. }
  279. if (start < end) {
  280. size = end - start;
  281. total_added += size;
  282. ret = btrfs_add_free_space(block_group, start, size);
  283. BUG_ON(ret);
  284. }
  285. return total_added;
  286. }
  287. static int caching_kthread(void *data)
  288. {
  289. struct btrfs_block_group_cache *block_group = data;
  290. struct btrfs_fs_info *fs_info = block_group->fs_info;
  291. struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
  292. struct btrfs_root *extent_root = fs_info->extent_root;
  293. struct btrfs_path *path;
  294. struct extent_buffer *leaf;
  295. struct btrfs_key key;
  296. u64 total_found = 0;
  297. u64 last = 0;
  298. u32 nritems;
  299. int ret = 0;
  300. path = btrfs_alloc_path();
  301. if (!path)
  302. return -ENOMEM;
  303. last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
  304. /*
  305. * We don't want to deadlock with somebody trying to allocate a new
  306. * extent for the extent root while also trying to search the extent
  307. * root to add free space. So we skip locking and search the commit
  308. * root, since its read-only
  309. */
  310. path->skip_locking = 1;
  311. path->search_commit_root = 1;
  312. path->reada = 2;
  313. key.objectid = last;
  314. key.offset = 0;
  315. key.type = BTRFS_EXTENT_ITEM_KEY;
  316. again:
  317. mutex_lock(&caching_ctl->mutex);
  318. /* need to make sure the commit_root doesn't disappear */
  319. down_read(&fs_info->extent_commit_sem);
  320. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  321. if (ret < 0)
  322. goto err;
  323. leaf = path->nodes[0];
  324. nritems = btrfs_header_nritems(leaf);
  325. while (1) {
  326. smp_mb();
  327. if (fs_info->closing > 1) {
  328. last = (u64)-1;
  329. break;
  330. }
  331. if (path->slots[0] < nritems) {
  332. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  333. } else {
  334. ret = find_next_key(path, 0, &key);
  335. if (ret)
  336. break;
  337. caching_ctl->progress = last;
  338. btrfs_release_path(path);
  339. up_read(&fs_info->extent_commit_sem);
  340. mutex_unlock(&caching_ctl->mutex);
  341. if (btrfs_transaction_in_commit(fs_info))
  342. schedule_timeout(1);
  343. else
  344. cond_resched();
  345. goto again;
  346. }
  347. if (key.objectid < block_group->key.objectid) {
  348. path->slots[0]++;
  349. continue;
  350. }
  351. if (key.objectid >= block_group->key.objectid +
  352. block_group->key.offset)
  353. break;
  354. if (key.type == BTRFS_EXTENT_ITEM_KEY) {
  355. total_found += add_new_free_space(block_group,
  356. fs_info, last,
  357. key.objectid);
  358. last = key.objectid + key.offset;
  359. if (total_found > (1024 * 1024 * 2)) {
  360. total_found = 0;
  361. wake_up(&caching_ctl->wait);
  362. }
  363. }
  364. path->slots[0]++;
  365. }
  366. ret = 0;
  367. total_found += add_new_free_space(block_group, fs_info, last,
  368. block_group->key.objectid +
  369. block_group->key.offset);
  370. caching_ctl->progress = (u64)-1;
  371. spin_lock(&block_group->lock);
  372. block_group->caching_ctl = NULL;
  373. block_group->cached = BTRFS_CACHE_FINISHED;
  374. spin_unlock(&block_group->lock);
  375. err:
  376. btrfs_free_path(path);
  377. up_read(&fs_info->extent_commit_sem);
  378. free_excluded_extents(extent_root, block_group);
  379. mutex_unlock(&caching_ctl->mutex);
  380. wake_up(&caching_ctl->wait);
  381. put_caching_control(caching_ctl);
  382. atomic_dec(&block_group->space_info->caching_threads);
  383. btrfs_put_block_group(block_group);
  384. return 0;
  385. }
  386. static int cache_block_group(struct btrfs_block_group_cache *cache,
  387. struct btrfs_trans_handle *trans,
  388. struct btrfs_root *root,
  389. int load_cache_only)
  390. {
  391. struct btrfs_fs_info *fs_info = cache->fs_info;
  392. struct btrfs_caching_control *caching_ctl;
  393. struct task_struct *tsk;
  394. int ret = 0;
  395. smp_mb();
  396. if (cache->cached != BTRFS_CACHE_NO)
  397. return 0;
  398. /*
  399. * We can't do the read from on-disk cache during a commit since we need
  400. * to have the normal tree locking. Also if we are currently trying to
  401. * allocate blocks for the tree root we can't do the fast caching since
  402. * we likely hold important locks.
  403. */
  404. if (trans && (!trans->transaction->in_commit) &&
  405. (root && root != root->fs_info->tree_root)) {
  406. spin_lock(&cache->lock);
  407. if (cache->cached != BTRFS_CACHE_NO) {
  408. spin_unlock(&cache->lock);
  409. return 0;
  410. }
  411. cache->cached = BTRFS_CACHE_STARTED;
  412. spin_unlock(&cache->lock);
  413. ret = load_free_space_cache(fs_info, cache);
  414. spin_lock(&cache->lock);
  415. if (ret == 1) {
  416. cache->cached = BTRFS_CACHE_FINISHED;
  417. cache->last_byte_to_unpin = (u64)-1;
  418. } else {
  419. cache->cached = BTRFS_CACHE_NO;
  420. }
  421. spin_unlock(&cache->lock);
  422. if (ret == 1) {
  423. free_excluded_extents(fs_info->extent_root, cache);
  424. return 0;
  425. }
  426. }
  427. if (load_cache_only)
  428. return 0;
  429. caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
  430. BUG_ON(!caching_ctl);
  431. INIT_LIST_HEAD(&caching_ctl->list);
  432. mutex_init(&caching_ctl->mutex);
  433. init_waitqueue_head(&caching_ctl->wait);
  434. caching_ctl->block_group = cache;
  435. caching_ctl->progress = cache->key.objectid;
  436. /* one for caching kthread, one for caching block group list */
  437. atomic_set(&caching_ctl->count, 2);
  438. spin_lock(&cache->lock);
  439. if (cache->cached != BTRFS_CACHE_NO) {
  440. spin_unlock(&cache->lock);
  441. kfree(caching_ctl);
  442. return 0;
  443. }
  444. cache->caching_ctl = caching_ctl;
  445. cache->cached = BTRFS_CACHE_STARTED;
  446. spin_unlock(&cache->lock);
  447. down_write(&fs_info->extent_commit_sem);
  448. list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
  449. up_write(&fs_info->extent_commit_sem);
  450. atomic_inc(&cache->space_info->caching_threads);
  451. btrfs_get_block_group(cache);
  452. tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
  453. cache->key.objectid);
  454. if (IS_ERR(tsk)) {
  455. ret = PTR_ERR(tsk);
  456. printk(KERN_ERR "error running thread %d\n", ret);
  457. BUG();
  458. }
  459. return ret;
  460. }
  461. /*
  462. * return the block group that starts at or after bytenr
  463. */
  464. static struct btrfs_block_group_cache *
  465. btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
  466. {
  467. struct btrfs_block_group_cache *cache;
  468. cache = block_group_cache_tree_search(info, bytenr, 0);
  469. return cache;
  470. }
  471. /*
  472. * return the block group that contains the given bytenr
  473. */
  474. struct btrfs_block_group_cache *btrfs_lookup_block_group(
  475. struct btrfs_fs_info *info,
  476. u64 bytenr)
  477. {
  478. struct btrfs_block_group_cache *cache;
  479. cache = block_group_cache_tree_search(info, bytenr, 1);
  480. return cache;
  481. }
  482. static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
  483. u64 flags)
  484. {
  485. struct list_head *head = &info->space_info;
  486. struct btrfs_space_info *found;
  487. flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
  488. BTRFS_BLOCK_GROUP_METADATA;
  489. rcu_read_lock();
  490. list_for_each_entry_rcu(found, head, list) {
  491. if (found->flags & flags) {
  492. rcu_read_unlock();
  493. return found;
  494. }
  495. }
  496. rcu_read_unlock();
  497. return NULL;
  498. }
  499. /*
  500. * after adding space to the filesystem, we need to clear the full flags
  501. * on all the space infos.
  502. */
  503. void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
  504. {
  505. struct list_head *head = &info->space_info;
  506. struct btrfs_space_info *found;
  507. rcu_read_lock();
  508. list_for_each_entry_rcu(found, head, list)
  509. found->full = 0;
  510. rcu_read_unlock();
  511. }
  512. static u64 div_factor(u64 num, int factor)
  513. {
  514. if (factor == 10)
  515. return num;
  516. num *= factor;
  517. do_div(num, 10);
  518. return num;
  519. }
  520. static u64 div_factor_fine(u64 num, int factor)
  521. {
  522. if (factor == 100)
  523. return num;
  524. num *= factor;
  525. do_div(num, 100);
  526. return num;
  527. }
  528. u64 btrfs_find_block_group(struct btrfs_root *root,
  529. u64 search_start, u64 search_hint, int owner)
  530. {
  531. struct btrfs_block_group_cache *cache;
  532. u64 used;
  533. u64 last = max(search_hint, search_start);
  534. u64 group_start = 0;
  535. int full_search = 0;
  536. int factor = 9;
  537. int wrapped = 0;
  538. again:
  539. while (1) {
  540. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  541. if (!cache)
  542. break;
  543. spin_lock(&cache->lock);
  544. last = cache->key.objectid + cache->key.offset;
  545. used = btrfs_block_group_used(&cache->item);
  546. if ((full_search || !cache->ro) &&
  547. block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
  548. if (used + cache->pinned + cache->reserved <
  549. div_factor(cache->key.offset, factor)) {
  550. group_start = cache->key.objectid;
  551. spin_unlock(&cache->lock);
  552. btrfs_put_block_group(cache);
  553. goto found;
  554. }
  555. }
  556. spin_unlock(&cache->lock);
  557. btrfs_put_block_group(cache);
  558. cond_resched();
  559. }
  560. if (!wrapped) {
  561. last = search_start;
  562. wrapped = 1;
  563. goto again;
  564. }
  565. if (!full_search && factor < 10) {
  566. last = search_start;
  567. full_search = 1;
  568. factor = 10;
  569. goto again;
  570. }
  571. found:
  572. return group_start;
  573. }
  574. /* simple helper to search for an existing extent at a given offset */
  575. int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
  576. {
  577. int ret;
  578. struct btrfs_key key;
  579. struct btrfs_path *path;
  580. path = btrfs_alloc_path();
  581. BUG_ON(!path);
  582. key.objectid = start;
  583. key.offset = len;
  584. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  585. ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
  586. 0, 0);
  587. btrfs_free_path(path);
  588. return ret;
  589. }
  590. /*
  591. * helper function to lookup reference count and flags of extent.
  592. *
  593. * the head node for delayed ref is used to store the sum of all the
  594. * reference count modifications queued up in the rbtree. the head
  595. * node may also store the extent flags to set. This way you can check
  596. * to see what the reference count and extent flags would be if all of
  597. * the delayed refs are not processed.
  598. */
  599. int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
  600. struct btrfs_root *root, u64 bytenr,
  601. u64 num_bytes, u64 *refs, u64 *flags)
  602. {
  603. struct btrfs_delayed_ref_head *head;
  604. struct btrfs_delayed_ref_root *delayed_refs;
  605. struct btrfs_path *path;
  606. struct btrfs_extent_item *ei;
  607. struct extent_buffer *leaf;
  608. struct btrfs_key key;
  609. u32 item_size;
  610. u64 num_refs;
  611. u64 extent_flags;
  612. int ret;
  613. path = btrfs_alloc_path();
  614. if (!path)
  615. return -ENOMEM;
  616. key.objectid = bytenr;
  617. key.type = BTRFS_EXTENT_ITEM_KEY;
  618. key.offset = num_bytes;
  619. if (!trans) {
  620. path->skip_locking = 1;
  621. path->search_commit_root = 1;
  622. }
  623. again:
  624. ret = btrfs_search_slot(trans, root->fs_info->extent_root,
  625. &key, path, 0, 0);
  626. if (ret < 0)
  627. goto out_free;
  628. if (ret == 0) {
  629. leaf = path->nodes[0];
  630. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  631. if (item_size >= sizeof(*ei)) {
  632. ei = btrfs_item_ptr(leaf, path->slots[0],
  633. struct btrfs_extent_item);
  634. num_refs = btrfs_extent_refs(leaf, ei);
  635. extent_flags = btrfs_extent_flags(leaf, ei);
  636. } else {
  637. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  638. struct btrfs_extent_item_v0 *ei0;
  639. BUG_ON(item_size != sizeof(*ei0));
  640. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  641. struct btrfs_extent_item_v0);
  642. num_refs = btrfs_extent_refs_v0(leaf, ei0);
  643. /* FIXME: this isn't correct for data */
  644. extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  645. #else
  646. BUG();
  647. #endif
  648. }
  649. BUG_ON(num_refs == 0);
  650. } else {
  651. num_refs = 0;
  652. extent_flags = 0;
  653. ret = 0;
  654. }
  655. if (!trans)
  656. goto out;
  657. delayed_refs = &trans->transaction->delayed_refs;
  658. spin_lock(&delayed_refs->lock);
  659. head = btrfs_find_delayed_ref_head(trans, bytenr);
  660. if (head) {
  661. if (!mutex_trylock(&head->mutex)) {
  662. atomic_inc(&head->node.refs);
  663. spin_unlock(&delayed_refs->lock);
  664. btrfs_release_path(path);
  665. /*
  666. * Mutex was contended, block until it's released and try
  667. * again
  668. */
  669. mutex_lock(&head->mutex);
  670. mutex_unlock(&head->mutex);
  671. btrfs_put_delayed_ref(&head->node);
  672. goto again;
  673. }
  674. if (head->extent_op && head->extent_op->update_flags)
  675. extent_flags |= head->extent_op->flags_to_set;
  676. else
  677. BUG_ON(num_refs == 0);
  678. num_refs += head->node.ref_mod;
  679. mutex_unlock(&head->mutex);
  680. }
  681. spin_unlock(&delayed_refs->lock);
  682. out:
  683. WARN_ON(num_refs == 0);
  684. if (refs)
  685. *refs = num_refs;
  686. if (flags)
  687. *flags = extent_flags;
  688. out_free:
  689. btrfs_free_path(path);
  690. return ret;
  691. }
  692. /*
  693. * Back reference rules. Back refs have three main goals:
  694. *
  695. * 1) differentiate between all holders of references to an extent so that
  696. * when a reference is dropped we can make sure it was a valid reference
  697. * before freeing the extent.
  698. *
  699. * 2) Provide enough information to quickly find the holders of an extent
  700. * if we notice a given block is corrupted or bad.
  701. *
  702. * 3) Make it easy to migrate blocks for FS shrinking or storage pool
  703. * maintenance. This is actually the same as #2, but with a slightly
  704. * different use case.
  705. *
  706. * There are two kinds of back refs. The implicit back refs is optimized
  707. * for pointers in non-shared tree blocks. For a given pointer in a block,
  708. * back refs of this kind provide information about the block's owner tree
  709. * and the pointer's key. These information allow us to find the block by
  710. * b-tree searching. The full back refs is for pointers in tree blocks not
  711. * referenced by their owner trees. The location of tree block is recorded
  712. * in the back refs. Actually the full back refs is generic, and can be
  713. * used in all cases the implicit back refs is used. The major shortcoming
  714. * of the full back refs is its overhead. Every time a tree block gets
  715. * COWed, we have to update back refs entry for all pointers in it.
  716. *
  717. * For a newly allocated tree block, we use implicit back refs for
  718. * pointers in it. This means most tree related operations only involve
  719. * implicit back refs. For a tree block created in old transaction, the
  720. * only way to drop a reference to it is COW it. So we can detect the
  721. * event that tree block loses its owner tree's reference and do the
  722. * back refs conversion.
  723. *
  724. * When a tree block is COW'd through a tree, there are four cases:
  725. *
  726. * The reference count of the block is one and the tree is the block's
  727. * owner tree. Nothing to do in this case.
  728. *
  729. * The reference count of the block is one and the tree is not the
  730. * block's owner tree. In this case, full back refs is used for pointers
  731. * in the block. Remove these full back refs, add implicit back refs for
  732. * every pointers in the new block.
  733. *
  734. * The reference count of the block is greater than one and the tree is
  735. * the block's owner tree. In this case, implicit back refs is used for
  736. * pointers in the block. Add full back refs for every pointers in the
  737. * block, increase lower level extents' reference counts. The original
  738. * implicit back refs are entailed to the new block.
  739. *
  740. * The reference count of the block is greater than one and the tree is
  741. * not the block's owner tree. Add implicit back refs for every pointer in
  742. * the new block, increase lower level extents' reference count.
  743. *
  744. * Back Reference Key composing:
  745. *
  746. * The key objectid corresponds to the first byte in the extent,
  747. * The key type is used to differentiate between types of back refs.
  748. * There are different meanings of the key offset for different types
  749. * of back refs.
  750. *
  751. * File extents can be referenced by:
  752. *
  753. * - multiple snapshots, subvolumes, or different generations in one subvol
  754. * - different files inside a single subvolume
  755. * - different offsets inside a file (bookend extents in file.c)
  756. *
  757. * The extent ref structure for the implicit back refs has fields for:
  758. *
  759. * - Objectid of the subvolume root
  760. * - objectid of the file holding the reference
  761. * - original offset in the file
  762. * - how many bookend extents
  763. *
  764. * The key offset for the implicit back refs is hash of the first
  765. * three fields.
  766. *
  767. * The extent ref structure for the full back refs has field for:
  768. *
  769. * - number of pointers in the tree leaf
  770. *
  771. * The key offset for the implicit back refs is the first byte of
  772. * the tree leaf
  773. *
  774. * When a file extent is allocated, The implicit back refs is used.
  775. * the fields are filled in:
  776. *
  777. * (root_key.objectid, inode objectid, offset in file, 1)
  778. *
  779. * When a file extent is removed file truncation, we find the
  780. * corresponding implicit back refs and check the following fields:
  781. *
  782. * (btrfs_header_owner(leaf), inode objectid, offset in file)
  783. *
  784. * Btree extents can be referenced by:
  785. *
  786. * - Different subvolumes
  787. *
  788. * Both the implicit back refs and the full back refs for tree blocks
  789. * only consist of key. The key offset for the implicit back refs is
  790. * objectid of block's owner tree. The key offset for the full back refs
  791. * is the first byte of parent block.
  792. *
  793. * When implicit back refs is used, information about the lowest key and
  794. * level of the tree block are required. These information are stored in
  795. * tree block info structure.
  796. */
  797. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  798. static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
  799. struct btrfs_root *root,
  800. struct btrfs_path *path,
  801. u64 owner, u32 extra_size)
  802. {
  803. struct btrfs_extent_item *item;
  804. struct btrfs_extent_item_v0 *ei0;
  805. struct btrfs_extent_ref_v0 *ref0;
  806. struct btrfs_tree_block_info *bi;
  807. struct extent_buffer *leaf;
  808. struct btrfs_key key;
  809. struct btrfs_key found_key;
  810. u32 new_size = sizeof(*item);
  811. u64 refs;
  812. int ret;
  813. leaf = path->nodes[0];
  814. BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
  815. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  816. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  817. struct btrfs_extent_item_v0);
  818. refs = btrfs_extent_refs_v0(leaf, ei0);
  819. if (owner == (u64)-1) {
  820. while (1) {
  821. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  822. ret = btrfs_next_leaf(root, path);
  823. if (ret < 0)
  824. return ret;
  825. BUG_ON(ret > 0);
  826. leaf = path->nodes[0];
  827. }
  828. btrfs_item_key_to_cpu(leaf, &found_key,
  829. path->slots[0]);
  830. BUG_ON(key.objectid != found_key.objectid);
  831. if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
  832. path->slots[0]++;
  833. continue;
  834. }
  835. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  836. struct btrfs_extent_ref_v0);
  837. owner = btrfs_ref_objectid_v0(leaf, ref0);
  838. break;
  839. }
  840. }
  841. btrfs_release_path(path);
  842. if (owner < BTRFS_FIRST_FREE_OBJECTID)
  843. new_size += sizeof(*bi);
  844. new_size -= sizeof(*ei0);
  845. ret = btrfs_search_slot(trans, root, &key, path,
  846. new_size + extra_size, 1);
  847. if (ret < 0)
  848. return ret;
  849. BUG_ON(ret);
  850. ret = btrfs_extend_item(trans, root, path, new_size);
  851. BUG_ON(ret);
  852. leaf = path->nodes[0];
  853. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  854. btrfs_set_extent_refs(leaf, item, refs);
  855. /* FIXME: get real generation */
  856. btrfs_set_extent_generation(leaf, item, 0);
  857. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  858. btrfs_set_extent_flags(leaf, item,
  859. BTRFS_EXTENT_FLAG_TREE_BLOCK |
  860. BTRFS_BLOCK_FLAG_FULL_BACKREF);
  861. bi = (struct btrfs_tree_block_info *)(item + 1);
  862. /* FIXME: get first key of the block */
  863. memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
  864. btrfs_set_tree_block_level(leaf, bi, (int)owner);
  865. } else {
  866. btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
  867. }
  868. btrfs_mark_buffer_dirty(leaf);
  869. return 0;
  870. }
  871. #endif
  872. static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
  873. {
  874. u32 high_crc = ~(u32)0;
  875. u32 low_crc = ~(u32)0;
  876. __le64 lenum;
  877. lenum = cpu_to_le64(root_objectid);
  878. high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
  879. lenum = cpu_to_le64(owner);
  880. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  881. lenum = cpu_to_le64(offset);
  882. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  883. return ((u64)high_crc << 31) ^ (u64)low_crc;
  884. }
  885. static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
  886. struct btrfs_extent_data_ref *ref)
  887. {
  888. return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
  889. btrfs_extent_data_ref_objectid(leaf, ref),
  890. btrfs_extent_data_ref_offset(leaf, ref));
  891. }
  892. static int match_extent_data_ref(struct extent_buffer *leaf,
  893. struct btrfs_extent_data_ref *ref,
  894. u64 root_objectid, u64 owner, u64 offset)
  895. {
  896. if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
  897. btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
  898. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  899. return 0;
  900. return 1;
  901. }
  902. static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
  903. struct btrfs_root *root,
  904. struct btrfs_path *path,
  905. u64 bytenr, u64 parent,
  906. u64 root_objectid,
  907. u64 owner, u64 offset)
  908. {
  909. struct btrfs_key key;
  910. struct btrfs_extent_data_ref *ref;
  911. struct extent_buffer *leaf;
  912. u32 nritems;
  913. int ret;
  914. int recow;
  915. int err = -ENOENT;
  916. key.objectid = bytenr;
  917. if (parent) {
  918. key.type = BTRFS_SHARED_DATA_REF_KEY;
  919. key.offset = parent;
  920. } else {
  921. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  922. key.offset = hash_extent_data_ref(root_objectid,
  923. owner, offset);
  924. }
  925. again:
  926. recow = 0;
  927. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  928. if (ret < 0) {
  929. err = ret;
  930. goto fail;
  931. }
  932. if (parent) {
  933. if (!ret)
  934. return 0;
  935. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  936. key.type = BTRFS_EXTENT_REF_V0_KEY;
  937. btrfs_release_path(path);
  938. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  939. if (ret < 0) {
  940. err = ret;
  941. goto fail;
  942. }
  943. if (!ret)
  944. return 0;
  945. #endif
  946. goto fail;
  947. }
  948. leaf = path->nodes[0];
  949. nritems = btrfs_header_nritems(leaf);
  950. while (1) {
  951. if (path->slots[0] >= nritems) {
  952. ret = btrfs_next_leaf(root, path);
  953. if (ret < 0)
  954. err = ret;
  955. if (ret)
  956. goto fail;
  957. leaf = path->nodes[0];
  958. nritems = btrfs_header_nritems(leaf);
  959. recow = 1;
  960. }
  961. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  962. if (key.objectid != bytenr ||
  963. key.type != BTRFS_EXTENT_DATA_REF_KEY)
  964. goto fail;
  965. ref = btrfs_item_ptr(leaf, path->slots[0],
  966. struct btrfs_extent_data_ref);
  967. if (match_extent_data_ref(leaf, ref, root_objectid,
  968. owner, offset)) {
  969. if (recow) {
  970. btrfs_release_path(path);
  971. goto again;
  972. }
  973. err = 0;
  974. break;
  975. }
  976. path->slots[0]++;
  977. }
  978. fail:
  979. return err;
  980. }
  981. static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
  982. struct btrfs_root *root,
  983. struct btrfs_path *path,
  984. u64 bytenr, u64 parent,
  985. u64 root_objectid, u64 owner,
  986. u64 offset, int refs_to_add)
  987. {
  988. struct btrfs_key key;
  989. struct extent_buffer *leaf;
  990. u32 size;
  991. u32 num_refs;
  992. int ret;
  993. key.objectid = bytenr;
  994. if (parent) {
  995. key.type = BTRFS_SHARED_DATA_REF_KEY;
  996. key.offset = parent;
  997. size = sizeof(struct btrfs_shared_data_ref);
  998. } else {
  999. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1000. key.offset = hash_extent_data_ref(root_objectid,
  1001. owner, offset);
  1002. size = sizeof(struct btrfs_extent_data_ref);
  1003. }
  1004. ret = btrfs_insert_empty_item(trans, root, path, &key, size);
  1005. if (ret && ret != -EEXIST)
  1006. goto fail;
  1007. leaf = path->nodes[0];
  1008. if (parent) {
  1009. struct btrfs_shared_data_ref *ref;
  1010. ref = btrfs_item_ptr(leaf, path->slots[0],
  1011. struct btrfs_shared_data_ref);
  1012. if (ret == 0) {
  1013. btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
  1014. } else {
  1015. num_refs = btrfs_shared_data_ref_count(leaf, ref);
  1016. num_refs += refs_to_add;
  1017. btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
  1018. }
  1019. } else {
  1020. struct btrfs_extent_data_ref *ref;
  1021. while (ret == -EEXIST) {
  1022. ref = btrfs_item_ptr(leaf, path->slots[0],
  1023. struct btrfs_extent_data_ref);
  1024. if (match_extent_data_ref(leaf, ref, root_objectid,
  1025. owner, offset))
  1026. break;
  1027. btrfs_release_path(path);
  1028. key.offset++;
  1029. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1030. size);
  1031. if (ret && ret != -EEXIST)
  1032. goto fail;
  1033. leaf = path->nodes[0];
  1034. }
  1035. ref = btrfs_item_ptr(leaf, path->slots[0],
  1036. struct btrfs_extent_data_ref);
  1037. if (ret == 0) {
  1038. btrfs_set_extent_data_ref_root(leaf, ref,
  1039. root_objectid);
  1040. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  1041. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  1042. btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
  1043. } else {
  1044. num_refs = btrfs_extent_data_ref_count(leaf, ref);
  1045. num_refs += refs_to_add;
  1046. btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
  1047. }
  1048. }
  1049. btrfs_mark_buffer_dirty(leaf);
  1050. ret = 0;
  1051. fail:
  1052. btrfs_release_path(path);
  1053. return ret;
  1054. }
  1055. static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
  1056. struct btrfs_root *root,
  1057. struct btrfs_path *path,
  1058. int refs_to_drop)
  1059. {
  1060. struct btrfs_key key;
  1061. struct btrfs_extent_data_ref *ref1 = NULL;
  1062. struct btrfs_shared_data_ref *ref2 = NULL;
  1063. struct extent_buffer *leaf;
  1064. u32 num_refs = 0;
  1065. int ret = 0;
  1066. leaf = path->nodes[0];
  1067. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1068. if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1069. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1070. struct btrfs_extent_data_ref);
  1071. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1072. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1073. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1074. struct btrfs_shared_data_ref);
  1075. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1076. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1077. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1078. struct btrfs_extent_ref_v0 *ref0;
  1079. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1080. struct btrfs_extent_ref_v0);
  1081. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1082. #endif
  1083. } else {
  1084. BUG();
  1085. }
  1086. BUG_ON(num_refs < refs_to_drop);
  1087. num_refs -= refs_to_drop;
  1088. if (num_refs == 0) {
  1089. ret = btrfs_del_item(trans, root, path);
  1090. } else {
  1091. if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
  1092. btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
  1093. else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
  1094. btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
  1095. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1096. else {
  1097. struct btrfs_extent_ref_v0 *ref0;
  1098. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1099. struct btrfs_extent_ref_v0);
  1100. btrfs_set_ref_count_v0(leaf, ref0, num_refs);
  1101. }
  1102. #endif
  1103. btrfs_mark_buffer_dirty(leaf);
  1104. }
  1105. return ret;
  1106. }
  1107. static noinline u32 extent_data_ref_count(struct btrfs_root *root,
  1108. struct btrfs_path *path,
  1109. struct btrfs_extent_inline_ref *iref)
  1110. {
  1111. struct btrfs_key key;
  1112. struct extent_buffer *leaf;
  1113. struct btrfs_extent_data_ref *ref1;
  1114. struct btrfs_shared_data_ref *ref2;
  1115. u32 num_refs = 0;
  1116. leaf = path->nodes[0];
  1117. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1118. if (iref) {
  1119. if (btrfs_extent_inline_ref_type(leaf, iref) ==
  1120. BTRFS_EXTENT_DATA_REF_KEY) {
  1121. ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
  1122. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1123. } else {
  1124. ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
  1125. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1126. }
  1127. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1128. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1129. struct btrfs_extent_data_ref);
  1130. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1131. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1132. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1133. struct btrfs_shared_data_ref);
  1134. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1135. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1136. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1137. struct btrfs_extent_ref_v0 *ref0;
  1138. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1139. struct btrfs_extent_ref_v0);
  1140. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1141. #endif
  1142. } else {
  1143. WARN_ON(1);
  1144. }
  1145. return num_refs;
  1146. }
  1147. static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
  1148. struct btrfs_root *root,
  1149. struct btrfs_path *path,
  1150. u64 bytenr, u64 parent,
  1151. u64 root_objectid)
  1152. {
  1153. struct btrfs_key key;
  1154. int ret;
  1155. key.objectid = bytenr;
  1156. if (parent) {
  1157. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1158. key.offset = parent;
  1159. } else {
  1160. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1161. key.offset = root_objectid;
  1162. }
  1163. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1164. if (ret > 0)
  1165. ret = -ENOENT;
  1166. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1167. if (ret == -ENOENT && parent) {
  1168. btrfs_release_path(path);
  1169. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1170. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1171. if (ret > 0)
  1172. ret = -ENOENT;
  1173. }
  1174. #endif
  1175. return ret;
  1176. }
  1177. static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
  1178. struct btrfs_root *root,
  1179. struct btrfs_path *path,
  1180. u64 bytenr, u64 parent,
  1181. u64 root_objectid)
  1182. {
  1183. struct btrfs_key key;
  1184. int ret;
  1185. key.objectid = bytenr;
  1186. if (parent) {
  1187. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1188. key.offset = parent;
  1189. } else {
  1190. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1191. key.offset = root_objectid;
  1192. }
  1193. ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
  1194. btrfs_release_path(path);
  1195. return ret;
  1196. }
  1197. static inline int extent_ref_type(u64 parent, u64 owner)
  1198. {
  1199. int type;
  1200. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1201. if (parent > 0)
  1202. type = BTRFS_SHARED_BLOCK_REF_KEY;
  1203. else
  1204. type = BTRFS_TREE_BLOCK_REF_KEY;
  1205. } else {
  1206. if (parent > 0)
  1207. type = BTRFS_SHARED_DATA_REF_KEY;
  1208. else
  1209. type = BTRFS_EXTENT_DATA_REF_KEY;
  1210. }
  1211. return type;
  1212. }
  1213. static int find_next_key(struct btrfs_path *path, int level,
  1214. struct btrfs_key *key)
  1215. {
  1216. for (; level < BTRFS_MAX_LEVEL; level++) {
  1217. if (!path->nodes[level])
  1218. break;
  1219. if (path->slots[level] + 1 >=
  1220. btrfs_header_nritems(path->nodes[level]))
  1221. continue;
  1222. if (level == 0)
  1223. btrfs_item_key_to_cpu(path->nodes[level], key,
  1224. path->slots[level] + 1);
  1225. else
  1226. btrfs_node_key_to_cpu(path->nodes[level], key,
  1227. path->slots[level] + 1);
  1228. return 0;
  1229. }
  1230. return 1;
  1231. }
  1232. /*
  1233. * look for inline back ref. if back ref is found, *ref_ret is set
  1234. * to the address of inline back ref, and 0 is returned.
  1235. *
  1236. * if back ref isn't found, *ref_ret is set to the address where it
  1237. * should be inserted, and -ENOENT is returned.
  1238. *
  1239. * if insert is true and there are too many inline back refs, the path
  1240. * points to the extent item, and -EAGAIN is returned.
  1241. *
  1242. * NOTE: inline back refs are ordered in the same way that back ref
  1243. * items in the tree are ordered.
  1244. */
  1245. static noinline_for_stack
  1246. int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1247. struct btrfs_root *root,
  1248. struct btrfs_path *path,
  1249. struct btrfs_extent_inline_ref **ref_ret,
  1250. u64 bytenr, u64 num_bytes,
  1251. u64 parent, u64 root_objectid,
  1252. u64 owner, u64 offset, int insert)
  1253. {
  1254. struct btrfs_key key;
  1255. struct extent_buffer *leaf;
  1256. struct btrfs_extent_item *ei;
  1257. struct btrfs_extent_inline_ref *iref;
  1258. u64 flags;
  1259. u64 item_size;
  1260. unsigned long ptr;
  1261. unsigned long end;
  1262. int extra_size;
  1263. int type;
  1264. int want;
  1265. int ret;
  1266. int err = 0;
  1267. key.objectid = bytenr;
  1268. key.type = BTRFS_EXTENT_ITEM_KEY;
  1269. key.offset = num_bytes;
  1270. want = extent_ref_type(parent, owner);
  1271. if (insert) {
  1272. extra_size = btrfs_extent_inline_ref_size(want);
  1273. path->keep_locks = 1;
  1274. } else
  1275. extra_size = -1;
  1276. ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
  1277. if (ret < 0) {
  1278. err = ret;
  1279. goto out;
  1280. }
  1281. BUG_ON(ret);
  1282. leaf = path->nodes[0];
  1283. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1284. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1285. if (item_size < sizeof(*ei)) {
  1286. if (!insert) {
  1287. err = -ENOENT;
  1288. goto out;
  1289. }
  1290. ret = convert_extent_item_v0(trans, root, path, owner,
  1291. extra_size);
  1292. if (ret < 0) {
  1293. err = ret;
  1294. goto out;
  1295. }
  1296. leaf = path->nodes[0];
  1297. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1298. }
  1299. #endif
  1300. BUG_ON(item_size < sizeof(*ei));
  1301. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1302. flags = btrfs_extent_flags(leaf, ei);
  1303. ptr = (unsigned long)(ei + 1);
  1304. end = (unsigned long)ei + item_size;
  1305. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  1306. ptr += sizeof(struct btrfs_tree_block_info);
  1307. BUG_ON(ptr > end);
  1308. } else {
  1309. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
  1310. }
  1311. err = -ENOENT;
  1312. while (1) {
  1313. if (ptr >= end) {
  1314. WARN_ON(ptr > end);
  1315. break;
  1316. }
  1317. iref = (struct btrfs_extent_inline_ref *)ptr;
  1318. type = btrfs_extent_inline_ref_type(leaf, iref);
  1319. if (want < type)
  1320. break;
  1321. if (want > type) {
  1322. ptr += btrfs_extent_inline_ref_size(type);
  1323. continue;
  1324. }
  1325. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1326. struct btrfs_extent_data_ref *dref;
  1327. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1328. if (match_extent_data_ref(leaf, dref, root_objectid,
  1329. owner, offset)) {
  1330. err = 0;
  1331. break;
  1332. }
  1333. if (hash_extent_data_ref_item(leaf, dref) <
  1334. hash_extent_data_ref(root_objectid, owner, offset))
  1335. break;
  1336. } else {
  1337. u64 ref_offset;
  1338. ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
  1339. if (parent > 0) {
  1340. if (parent == ref_offset) {
  1341. err = 0;
  1342. break;
  1343. }
  1344. if (ref_offset < parent)
  1345. break;
  1346. } else {
  1347. if (root_objectid == ref_offset) {
  1348. err = 0;
  1349. break;
  1350. }
  1351. if (ref_offset < root_objectid)
  1352. break;
  1353. }
  1354. }
  1355. ptr += btrfs_extent_inline_ref_size(type);
  1356. }
  1357. if (err == -ENOENT && insert) {
  1358. if (item_size + extra_size >=
  1359. BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
  1360. err = -EAGAIN;
  1361. goto out;
  1362. }
  1363. /*
  1364. * To add new inline back ref, we have to make sure
  1365. * there is no corresponding back ref item.
  1366. * For simplicity, we just do not add new inline back
  1367. * ref if there is any kind of item for this block
  1368. */
  1369. if (find_next_key(path, 0, &key) == 0 &&
  1370. key.objectid == bytenr &&
  1371. key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
  1372. err = -EAGAIN;
  1373. goto out;
  1374. }
  1375. }
  1376. *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
  1377. out:
  1378. if (insert) {
  1379. path->keep_locks = 0;
  1380. btrfs_unlock_up_safe(path, 1);
  1381. }
  1382. return err;
  1383. }
  1384. /*
  1385. * helper to add new inline back ref
  1386. */
  1387. static noinline_for_stack
  1388. int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1389. struct btrfs_root *root,
  1390. struct btrfs_path *path,
  1391. struct btrfs_extent_inline_ref *iref,
  1392. u64 parent, u64 root_objectid,
  1393. u64 owner, u64 offset, int refs_to_add,
  1394. struct btrfs_delayed_extent_op *extent_op)
  1395. {
  1396. struct extent_buffer *leaf;
  1397. struct btrfs_extent_item *ei;
  1398. unsigned long ptr;
  1399. unsigned long end;
  1400. unsigned long item_offset;
  1401. u64 refs;
  1402. int size;
  1403. int type;
  1404. int ret;
  1405. leaf = path->nodes[0];
  1406. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1407. item_offset = (unsigned long)iref - (unsigned long)ei;
  1408. type = extent_ref_type(parent, owner);
  1409. size = btrfs_extent_inline_ref_size(type);
  1410. ret = btrfs_extend_item(trans, root, path, size);
  1411. BUG_ON(ret);
  1412. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1413. refs = btrfs_extent_refs(leaf, ei);
  1414. refs += refs_to_add;
  1415. btrfs_set_extent_refs(leaf, ei, refs);
  1416. if (extent_op)
  1417. __run_delayed_extent_op(extent_op, leaf, ei);
  1418. ptr = (unsigned long)ei + item_offset;
  1419. end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
  1420. if (ptr < end - size)
  1421. memmove_extent_buffer(leaf, ptr + size, ptr,
  1422. end - size - ptr);
  1423. iref = (struct btrfs_extent_inline_ref *)ptr;
  1424. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  1425. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1426. struct btrfs_extent_data_ref *dref;
  1427. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1428. btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
  1429. btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
  1430. btrfs_set_extent_data_ref_offset(leaf, dref, offset);
  1431. btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
  1432. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1433. struct btrfs_shared_data_ref *sref;
  1434. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1435. btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
  1436. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1437. } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  1438. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1439. } else {
  1440. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  1441. }
  1442. btrfs_mark_buffer_dirty(leaf);
  1443. return 0;
  1444. }
  1445. static int lookup_extent_backref(struct btrfs_trans_handle *trans,
  1446. struct btrfs_root *root,
  1447. struct btrfs_path *path,
  1448. struct btrfs_extent_inline_ref **ref_ret,
  1449. u64 bytenr, u64 num_bytes, u64 parent,
  1450. u64 root_objectid, u64 owner, u64 offset)
  1451. {
  1452. int ret;
  1453. ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
  1454. bytenr, num_bytes, parent,
  1455. root_objectid, owner, offset, 0);
  1456. if (ret != -ENOENT)
  1457. return ret;
  1458. btrfs_release_path(path);
  1459. *ref_ret = NULL;
  1460. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1461. ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
  1462. root_objectid);
  1463. } else {
  1464. ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
  1465. root_objectid, owner, offset);
  1466. }
  1467. return ret;
  1468. }
  1469. /*
  1470. * helper to update/remove inline back ref
  1471. */
  1472. static noinline_for_stack
  1473. int update_inline_extent_backref(struct btrfs_trans_handle *trans,
  1474. struct btrfs_root *root,
  1475. struct btrfs_path *path,
  1476. struct btrfs_extent_inline_ref *iref,
  1477. int refs_to_mod,
  1478. struct btrfs_delayed_extent_op *extent_op)
  1479. {
  1480. struct extent_buffer *leaf;
  1481. struct btrfs_extent_item *ei;
  1482. struct btrfs_extent_data_ref *dref = NULL;
  1483. struct btrfs_shared_data_ref *sref = NULL;
  1484. unsigned long ptr;
  1485. unsigned long end;
  1486. u32 item_size;
  1487. int size;
  1488. int type;
  1489. int ret;
  1490. u64 refs;
  1491. leaf = path->nodes[0];
  1492. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1493. refs = btrfs_extent_refs(leaf, ei);
  1494. WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
  1495. refs += refs_to_mod;
  1496. btrfs_set_extent_refs(leaf, ei, refs);
  1497. if (extent_op)
  1498. __run_delayed_extent_op(extent_op, leaf, ei);
  1499. type = btrfs_extent_inline_ref_type(leaf, iref);
  1500. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1501. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1502. refs = btrfs_extent_data_ref_count(leaf, dref);
  1503. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1504. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1505. refs = btrfs_shared_data_ref_count(leaf, sref);
  1506. } else {
  1507. refs = 1;
  1508. BUG_ON(refs_to_mod != -1);
  1509. }
  1510. BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
  1511. refs += refs_to_mod;
  1512. if (refs > 0) {
  1513. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  1514. btrfs_set_extent_data_ref_count(leaf, dref, refs);
  1515. else
  1516. btrfs_set_shared_data_ref_count(leaf, sref, refs);
  1517. } else {
  1518. size = btrfs_extent_inline_ref_size(type);
  1519. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1520. ptr = (unsigned long)iref;
  1521. end = (unsigned long)ei + item_size;
  1522. if (ptr + size < end)
  1523. memmove_extent_buffer(leaf, ptr, ptr + size,
  1524. end - ptr - size);
  1525. item_size -= size;
  1526. ret = btrfs_truncate_item(trans, root, path, item_size, 1);
  1527. BUG_ON(ret);
  1528. }
  1529. btrfs_mark_buffer_dirty(leaf);
  1530. return 0;
  1531. }
  1532. static noinline_for_stack
  1533. int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
  1534. struct btrfs_root *root,
  1535. struct btrfs_path *path,
  1536. u64 bytenr, u64 num_bytes, u64 parent,
  1537. u64 root_objectid, u64 owner,
  1538. u64 offset, int refs_to_add,
  1539. struct btrfs_delayed_extent_op *extent_op)
  1540. {
  1541. struct btrfs_extent_inline_ref *iref;
  1542. int ret;
  1543. ret = lookup_inline_extent_backref(trans, root, path, &iref,
  1544. bytenr, num_bytes, parent,
  1545. root_objectid, owner, offset, 1);
  1546. if (ret == 0) {
  1547. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
  1548. ret = update_inline_extent_backref(trans, root, path, iref,
  1549. refs_to_add, extent_op);
  1550. } else if (ret == -ENOENT) {
  1551. ret = setup_inline_extent_backref(trans, root, path, iref,
  1552. parent, root_objectid,
  1553. owner, offset, refs_to_add,
  1554. extent_op);
  1555. }
  1556. return ret;
  1557. }
  1558. static int insert_extent_backref(struct btrfs_trans_handle *trans,
  1559. struct btrfs_root *root,
  1560. struct btrfs_path *path,
  1561. u64 bytenr, u64 parent, u64 root_objectid,
  1562. u64 owner, u64 offset, int refs_to_add)
  1563. {
  1564. int ret;
  1565. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1566. BUG_ON(refs_to_add != 1);
  1567. ret = insert_tree_block_ref(trans, root, path, bytenr,
  1568. parent, root_objectid);
  1569. } else {
  1570. ret = insert_extent_data_ref(trans, root, path, bytenr,
  1571. parent, root_objectid,
  1572. owner, offset, refs_to_add);
  1573. }
  1574. return ret;
  1575. }
  1576. static int remove_extent_backref(struct btrfs_trans_handle *trans,
  1577. struct btrfs_root *root,
  1578. struct btrfs_path *path,
  1579. struct btrfs_extent_inline_ref *iref,
  1580. int refs_to_drop, int is_data)
  1581. {
  1582. int ret;
  1583. BUG_ON(!is_data && refs_to_drop != 1);
  1584. if (iref) {
  1585. ret = update_inline_extent_backref(trans, root, path, iref,
  1586. -refs_to_drop, NULL);
  1587. } else if (is_data) {
  1588. ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
  1589. } else {
  1590. ret = btrfs_del_item(trans, root, path);
  1591. }
  1592. return ret;
  1593. }
  1594. static int btrfs_issue_discard(struct block_device *bdev,
  1595. u64 start, u64 len)
  1596. {
  1597. return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
  1598. }
  1599. static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
  1600. u64 num_bytes, u64 *actual_bytes)
  1601. {
  1602. int ret;
  1603. u64 discarded_bytes = 0;
  1604. struct btrfs_multi_bio *multi = NULL;
  1605. /* Tell the block device(s) that the sectors can be discarded */
  1606. ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
  1607. bytenr, &num_bytes, &multi, 0);
  1608. if (!ret) {
  1609. struct btrfs_bio_stripe *stripe = multi->stripes;
  1610. int i;
  1611. for (i = 0; i < multi->num_stripes; i++, stripe++) {
  1612. ret = btrfs_issue_discard(stripe->dev->bdev,
  1613. stripe->physical,
  1614. stripe->length);
  1615. if (!ret)
  1616. discarded_bytes += stripe->length;
  1617. else if (ret != -EOPNOTSUPP)
  1618. break;
  1619. }
  1620. kfree(multi);
  1621. }
  1622. if (discarded_bytes && ret == -EOPNOTSUPP)
  1623. ret = 0;
  1624. if (actual_bytes)
  1625. *actual_bytes = discarded_bytes;
  1626. return ret;
  1627. }
  1628. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1629. struct btrfs_root *root,
  1630. u64 bytenr, u64 num_bytes, u64 parent,
  1631. u64 root_objectid, u64 owner, u64 offset)
  1632. {
  1633. int ret;
  1634. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
  1635. root_objectid == BTRFS_TREE_LOG_OBJECTID);
  1636. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1637. ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
  1638. parent, root_objectid, (int)owner,
  1639. BTRFS_ADD_DELAYED_REF, NULL);
  1640. } else {
  1641. ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
  1642. parent, root_objectid, owner, offset,
  1643. BTRFS_ADD_DELAYED_REF, NULL);
  1644. }
  1645. return ret;
  1646. }
  1647. static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1648. struct btrfs_root *root,
  1649. u64 bytenr, u64 num_bytes,
  1650. u64 parent, u64 root_objectid,
  1651. u64 owner, u64 offset, int refs_to_add,
  1652. struct btrfs_delayed_extent_op *extent_op)
  1653. {
  1654. struct btrfs_path *path;
  1655. struct extent_buffer *leaf;
  1656. struct btrfs_extent_item *item;
  1657. u64 refs;
  1658. int ret;
  1659. int err = 0;
  1660. path = btrfs_alloc_path();
  1661. if (!path)
  1662. return -ENOMEM;
  1663. path->reada = 1;
  1664. path->leave_spinning = 1;
  1665. /* this will setup the path even if it fails to insert the back ref */
  1666. ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
  1667. path, bytenr, num_bytes, parent,
  1668. root_objectid, owner, offset,
  1669. refs_to_add, extent_op);
  1670. if (ret == 0)
  1671. goto out;
  1672. if (ret != -EAGAIN) {
  1673. err = ret;
  1674. goto out;
  1675. }
  1676. leaf = path->nodes[0];
  1677. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1678. refs = btrfs_extent_refs(leaf, item);
  1679. btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
  1680. if (extent_op)
  1681. __run_delayed_extent_op(extent_op, leaf, item);
  1682. btrfs_mark_buffer_dirty(leaf);
  1683. btrfs_release_path(path);
  1684. path->reada = 1;
  1685. path->leave_spinning = 1;
  1686. /* now insert the actual backref */
  1687. ret = insert_extent_backref(trans, root->fs_info->extent_root,
  1688. path, bytenr, parent, root_objectid,
  1689. owner, offset, refs_to_add);
  1690. BUG_ON(ret);
  1691. out:
  1692. btrfs_free_path(path);
  1693. return err;
  1694. }
  1695. static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
  1696. struct btrfs_root *root,
  1697. struct btrfs_delayed_ref_node *node,
  1698. struct btrfs_delayed_extent_op *extent_op,
  1699. int insert_reserved)
  1700. {
  1701. int ret = 0;
  1702. struct btrfs_delayed_data_ref *ref;
  1703. struct btrfs_key ins;
  1704. u64 parent = 0;
  1705. u64 ref_root = 0;
  1706. u64 flags = 0;
  1707. ins.objectid = node->bytenr;
  1708. ins.offset = node->num_bytes;
  1709. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1710. ref = btrfs_delayed_node_to_data_ref(node);
  1711. if (node->type == BTRFS_SHARED_DATA_REF_KEY)
  1712. parent = ref->parent;
  1713. else
  1714. ref_root = ref->root;
  1715. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1716. if (extent_op) {
  1717. BUG_ON(extent_op->update_key);
  1718. flags |= extent_op->flags_to_set;
  1719. }
  1720. ret = alloc_reserved_file_extent(trans, root,
  1721. parent, ref_root, flags,
  1722. ref->objectid, ref->offset,
  1723. &ins, node->ref_mod);
  1724. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1725. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  1726. node->num_bytes, parent,
  1727. ref_root, ref->objectid,
  1728. ref->offset, node->ref_mod,
  1729. extent_op);
  1730. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1731. ret = __btrfs_free_extent(trans, root, node->bytenr,
  1732. node->num_bytes, parent,
  1733. ref_root, ref->objectid,
  1734. ref->offset, node->ref_mod,
  1735. extent_op);
  1736. } else {
  1737. BUG();
  1738. }
  1739. return ret;
  1740. }
  1741. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  1742. struct extent_buffer *leaf,
  1743. struct btrfs_extent_item *ei)
  1744. {
  1745. u64 flags = btrfs_extent_flags(leaf, ei);
  1746. if (extent_op->update_flags) {
  1747. flags |= extent_op->flags_to_set;
  1748. btrfs_set_extent_flags(leaf, ei, flags);
  1749. }
  1750. if (extent_op->update_key) {
  1751. struct btrfs_tree_block_info *bi;
  1752. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
  1753. bi = (struct btrfs_tree_block_info *)(ei + 1);
  1754. btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
  1755. }
  1756. }
  1757. static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
  1758. struct btrfs_root *root,
  1759. struct btrfs_delayed_ref_node *node,
  1760. struct btrfs_delayed_extent_op *extent_op)
  1761. {
  1762. struct btrfs_key key;
  1763. struct btrfs_path *path;
  1764. struct btrfs_extent_item *ei;
  1765. struct extent_buffer *leaf;
  1766. u32 item_size;
  1767. int ret;
  1768. int err = 0;
  1769. path = btrfs_alloc_path();
  1770. if (!path)
  1771. return -ENOMEM;
  1772. key.objectid = node->bytenr;
  1773. key.type = BTRFS_EXTENT_ITEM_KEY;
  1774. key.offset = node->num_bytes;
  1775. path->reada = 1;
  1776. path->leave_spinning = 1;
  1777. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
  1778. path, 0, 1);
  1779. if (ret < 0) {
  1780. err = ret;
  1781. goto out;
  1782. }
  1783. if (ret > 0) {
  1784. err = -EIO;
  1785. goto out;
  1786. }
  1787. leaf = path->nodes[0];
  1788. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1789. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1790. if (item_size < sizeof(*ei)) {
  1791. ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
  1792. path, (u64)-1, 0);
  1793. if (ret < 0) {
  1794. err = ret;
  1795. goto out;
  1796. }
  1797. leaf = path->nodes[0];
  1798. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1799. }
  1800. #endif
  1801. BUG_ON(item_size < sizeof(*ei));
  1802. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1803. __run_delayed_extent_op(extent_op, leaf, ei);
  1804. btrfs_mark_buffer_dirty(leaf);
  1805. out:
  1806. btrfs_free_path(path);
  1807. return err;
  1808. }
  1809. static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
  1810. struct btrfs_root *root,
  1811. struct btrfs_delayed_ref_node *node,
  1812. struct btrfs_delayed_extent_op *extent_op,
  1813. int insert_reserved)
  1814. {
  1815. int ret = 0;
  1816. struct btrfs_delayed_tree_ref *ref;
  1817. struct btrfs_key ins;
  1818. u64 parent = 0;
  1819. u64 ref_root = 0;
  1820. ins.objectid = node->bytenr;
  1821. ins.offset = node->num_bytes;
  1822. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1823. ref = btrfs_delayed_node_to_tree_ref(node);
  1824. if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  1825. parent = ref->parent;
  1826. else
  1827. ref_root = ref->root;
  1828. BUG_ON(node->ref_mod != 1);
  1829. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1830. BUG_ON(!extent_op || !extent_op->update_flags ||
  1831. !extent_op->update_key);
  1832. ret = alloc_reserved_tree_block(trans, root,
  1833. parent, ref_root,
  1834. extent_op->flags_to_set,
  1835. &extent_op->key,
  1836. ref->level, &ins);
  1837. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1838. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  1839. node->num_bytes, parent, ref_root,
  1840. ref->level, 0, 1, extent_op);
  1841. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1842. ret = __btrfs_free_extent(trans, root, node->bytenr,
  1843. node->num_bytes, parent, ref_root,
  1844. ref->level, 0, 1, extent_op);
  1845. } else {
  1846. BUG();
  1847. }
  1848. return ret;
  1849. }
  1850. /* helper function to actually process a single delayed ref entry */
  1851. static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
  1852. struct btrfs_root *root,
  1853. struct btrfs_delayed_ref_node *node,
  1854. struct btrfs_delayed_extent_op *extent_op,
  1855. int insert_reserved)
  1856. {
  1857. int ret;
  1858. if (btrfs_delayed_ref_is_head(node)) {
  1859. struct btrfs_delayed_ref_head *head;
  1860. /*
  1861. * we've hit the end of the chain and we were supposed
  1862. * to insert this extent into the tree. But, it got
  1863. * deleted before we ever needed to insert it, so all
  1864. * we have to do is clean up the accounting
  1865. */
  1866. BUG_ON(extent_op);
  1867. head = btrfs_delayed_node_to_head(node);
  1868. if (insert_reserved) {
  1869. btrfs_pin_extent(root, node->bytenr,
  1870. node->num_bytes, 1);
  1871. if (head->is_data) {
  1872. ret = btrfs_del_csums(trans, root,
  1873. node->bytenr,
  1874. node->num_bytes);
  1875. BUG_ON(ret);
  1876. }
  1877. }
  1878. mutex_unlock(&head->mutex);
  1879. return 0;
  1880. }
  1881. if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
  1882. node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  1883. ret = run_delayed_tree_ref(trans, root, node, extent_op,
  1884. insert_reserved);
  1885. else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
  1886. node->type == BTRFS_SHARED_DATA_REF_KEY)
  1887. ret = run_delayed_data_ref(trans, root, node, extent_op,
  1888. insert_reserved);
  1889. else
  1890. BUG();
  1891. return ret;
  1892. }
  1893. static noinline struct btrfs_delayed_ref_node *
  1894. select_delayed_ref(struct btrfs_delayed_ref_head *head)
  1895. {
  1896. struct rb_node *node;
  1897. struct btrfs_delayed_ref_node *ref;
  1898. int action = BTRFS_ADD_DELAYED_REF;
  1899. again:
  1900. /*
  1901. * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
  1902. * this prevents ref count from going down to zero when
  1903. * there still are pending delayed ref.
  1904. */
  1905. node = rb_prev(&head->node.rb_node);
  1906. while (1) {
  1907. if (!node)
  1908. break;
  1909. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  1910. rb_node);
  1911. if (ref->bytenr != head->node.bytenr)
  1912. break;
  1913. if (ref->action == action)
  1914. return ref;
  1915. node = rb_prev(node);
  1916. }
  1917. if (action == BTRFS_ADD_DELAYED_REF) {
  1918. action = BTRFS_DROP_DELAYED_REF;
  1919. goto again;
  1920. }
  1921. return NULL;
  1922. }
  1923. static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
  1924. struct btrfs_root *root,
  1925. struct list_head *cluster)
  1926. {
  1927. struct btrfs_delayed_ref_root *delayed_refs;
  1928. struct btrfs_delayed_ref_node *ref;
  1929. struct btrfs_delayed_ref_head *locked_ref = NULL;
  1930. struct btrfs_delayed_extent_op *extent_op;
  1931. int ret;
  1932. int count = 0;
  1933. int must_insert_reserved = 0;
  1934. delayed_refs = &trans->transaction->delayed_refs;
  1935. while (1) {
  1936. if (!locked_ref) {
  1937. /* pick a new head ref from the cluster list */
  1938. if (list_empty(cluster))
  1939. break;
  1940. locked_ref = list_entry(cluster->next,
  1941. struct btrfs_delayed_ref_head, cluster);
  1942. /* grab the lock that says we are going to process
  1943. * all the refs for this head */
  1944. ret = btrfs_delayed_ref_lock(trans, locked_ref);
  1945. /*
  1946. * we may have dropped the spin lock to get the head
  1947. * mutex lock, and that might have given someone else
  1948. * time to free the head. If that's true, it has been
  1949. * removed from our list and we can move on.
  1950. */
  1951. if (ret == -EAGAIN) {
  1952. locked_ref = NULL;
  1953. count++;
  1954. continue;
  1955. }
  1956. }
  1957. /*
  1958. * record the must insert reserved flag before we
  1959. * drop the spin lock.
  1960. */
  1961. must_insert_reserved = locked_ref->must_insert_reserved;
  1962. locked_ref->must_insert_reserved = 0;
  1963. extent_op = locked_ref->extent_op;
  1964. locked_ref->extent_op = NULL;
  1965. /*
  1966. * locked_ref is the head node, so we have to go one
  1967. * node back for any delayed ref updates
  1968. */
  1969. ref = select_delayed_ref(locked_ref);
  1970. if (!ref) {
  1971. /* All delayed refs have been processed, Go ahead
  1972. * and send the head node to run_one_delayed_ref,
  1973. * so that any accounting fixes can happen
  1974. */
  1975. ref = &locked_ref->node;
  1976. if (extent_op && must_insert_reserved) {
  1977. kfree(extent_op);
  1978. extent_op = NULL;
  1979. }
  1980. if (extent_op) {
  1981. spin_unlock(&delayed_refs->lock);
  1982. ret = run_delayed_extent_op(trans, root,
  1983. ref, extent_op);
  1984. BUG_ON(ret);
  1985. kfree(extent_op);
  1986. cond_resched();
  1987. spin_lock(&delayed_refs->lock);
  1988. continue;
  1989. }
  1990. list_del_init(&locked_ref->cluster);
  1991. locked_ref = NULL;
  1992. }
  1993. ref->in_tree = 0;
  1994. rb_erase(&ref->rb_node, &delayed_refs->root);
  1995. delayed_refs->num_entries--;
  1996. spin_unlock(&delayed_refs->lock);
  1997. ret = run_one_delayed_ref(trans, root, ref, extent_op,
  1998. must_insert_reserved);
  1999. BUG_ON(ret);
  2000. btrfs_put_delayed_ref(ref);
  2001. kfree(extent_op);
  2002. count++;
  2003. cond_resched();
  2004. spin_lock(&delayed_refs->lock);
  2005. }
  2006. return count;
  2007. }
  2008. /*
  2009. * this starts processing the delayed reference count updates and
  2010. * extent insertions we have queued up so far. count can be
  2011. * 0, which means to process everything in the tree at the start
  2012. * of the run (but not newly added entries), or it can be some target
  2013. * number you'd like to process.
  2014. */
  2015. int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2016. struct btrfs_root *root, unsigned long count)
  2017. {
  2018. struct rb_node *node;
  2019. struct btrfs_delayed_ref_root *delayed_refs;
  2020. struct btrfs_delayed_ref_node *ref;
  2021. struct list_head cluster;
  2022. int ret;
  2023. int run_all = count == (unsigned long)-1;
  2024. int run_most = 0;
  2025. if (root == root->fs_info->extent_root)
  2026. root = root->fs_info->tree_root;
  2027. delayed_refs = &trans->transaction->delayed_refs;
  2028. INIT_LIST_HEAD(&cluster);
  2029. again:
  2030. spin_lock(&delayed_refs->lock);
  2031. if (count == 0) {
  2032. count = delayed_refs->num_entries * 2;
  2033. run_most = 1;
  2034. }
  2035. while (1) {
  2036. if (!(run_all || run_most) &&
  2037. delayed_refs->num_heads_ready < 64)
  2038. break;
  2039. /*
  2040. * go find something we can process in the rbtree. We start at
  2041. * the beginning of the tree, and then build a cluster
  2042. * of refs to process starting at the first one we are able to
  2043. * lock
  2044. */
  2045. ret = btrfs_find_ref_cluster(trans, &cluster,
  2046. delayed_refs->run_delayed_start);
  2047. if (ret)
  2048. break;
  2049. ret = run_clustered_refs(trans, root, &cluster);
  2050. BUG_ON(ret < 0);
  2051. count -= min_t(unsigned long, ret, count);
  2052. if (count == 0)
  2053. break;
  2054. }
  2055. if (run_all) {
  2056. node = rb_first(&delayed_refs->root);
  2057. if (!node)
  2058. goto out;
  2059. count = (unsigned long)-1;
  2060. while (node) {
  2061. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  2062. rb_node);
  2063. if (btrfs_delayed_ref_is_head(ref)) {
  2064. struct btrfs_delayed_ref_head *head;
  2065. head = btrfs_delayed_node_to_head(ref);
  2066. atomic_inc(&ref->refs);
  2067. spin_unlock(&delayed_refs->lock);
  2068. /*
  2069. * Mutex was contended, block until it's
  2070. * released and try again
  2071. */
  2072. mutex_lock(&head->mutex);
  2073. mutex_unlock(&head->mutex);
  2074. btrfs_put_delayed_ref(ref);
  2075. cond_resched();
  2076. goto again;
  2077. }
  2078. node = rb_next(node);
  2079. }
  2080. spin_unlock(&delayed_refs->lock);
  2081. schedule_timeout(1);
  2082. goto again;
  2083. }
  2084. out:
  2085. spin_unlock(&delayed_refs->lock);
  2086. return 0;
  2087. }
  2088. int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
  2089. struct btrfs_root *root,
  2090. u64 bytenr, u64 num_bytes, u64 flags,
  2091. int is_data)
  2092. {
  2093. struct btrfs_delayed_extent_op *extent_op;
  2094. int ret;
  2095. extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
  2096. if (!extent_op)
  2097. return -ENOMEM;
  2098. extent_op->flags_to_set = flags;
  2099. extent_op->update_flags = 1;
  2100. extent_op->update_key = 0;
  2101. extent_op->is_data = is_data ? 1 : 0;
  2102. ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
  2103. if (ret)
  2104. kfree(extent_op);
  2105. return ret;
  2106. }
  2107. static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
  2108. struct btrfs_root *root,
  2109. struct btrfs_path *path,
  2110. u64 objectid, u64 offset, u64 bytenr)
  2111. {
  2112. struct btrfs_delayed_ref_head *head;
  2113. struct btrfs_delayed_ref_node *ref;
  2114. struct btrfs_delayed_data_ref *data_ref;
  2115. struct btrfs_delayed_ref_root *delayed_refs;
  2116. struct rb_node *node;
  2117. int ret = 0;
  2118. ret = -ENOENT;
  2119. delayed_refs = &trans->transaction->delayed_refs;
  2120. spin_lock(&delayed_refs->lock);
  2121. head = btrfs_find_delayed_ref_head(trans, bytenr);
  2122. if (!head)
  2123. goto out;
  2124. if (!mutex_trylock(&head->mutex)) {
  2125. atomic_inc(&head->node.refs);
  2126. spin_unlock(&delayed_refs->lock);
  2127. btrfs_release_path(path);
  2128. /*
  2129. * Mutex was contended, block until it's released and let
  2130. * caller try again
  2131. */
  2132. mutex_lock(&head->mutex);
  2133. mutex_unlock(&head->mutex);
  2134. btrfs_put_delayed_ref(&head->node);
  2135. return -EAGAIN;
  2136. }
  2137. node = rb_prev(&head->node.rb_node);
  2138. if (!node)
  2139. goto out_unlock;
  2140. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  2141. if (ref->bytenr != bytenr)
  2142. goto out_unlock;
  2143. ret = 1;
  2144. if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
  2145. goto out_unlock;
  2146. data_ref = btrfs_delayed_node_to_data_ref(ref);
  2147. node = rb_prev(node);
  2148. if (node) {
  2149. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  2150. if (ref->bytenr == bytenr)
  2151. goto out_unlock;
  2152. }
  2153. if (data_ref->root != root->root_key.objectid ||
  2154. data_ref->objectid != objectid || data_ref->offset != offset)
  2155. goto out_unlock;
  2156. ret = 0;
  2157. out_unlock:
  2158. mutex_unlock(&head->mutex);
  2159. out:
  2160. spin_unlock(&delayed_refs->lock);
  2161. return ret;
  2162. }
  2163. static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
  2164. struct btrfs_root *root,
  2165. struct btrfs_path *path,
  2166. u64 objectid, u64 offset, u64 bytenr)
  2167. {
  2168. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2169. struct extent_buffer *leaf;
  2170. struct btrfs_extent_data_ref *ref;
  2171. struct btrfs_extent_inline_ref *iref;
  2172. struct btrfs_extent_item *ei;
  2173. struct btrfs_key key;
  2174. u32 item_size;
  2175. int ret;
  2176. key.objectid = bytenr;
  2177. key.offset = (u64)-1;
  2178. key.type = BTRFS_EXTENT_ITEM_KEY;
  2179. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  2180. if (ret < 0)
  2181. goto out;
  2182. BUG_ON(ret == 0);
  2183. ret = -ENOENT;
  2184. if (path->slots[0] == 0)
  2185. goto out;
  2186. path->slots[0]--;
  2187. leaf = path->nodes[0];
  2188. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2189. if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
  2190. goto out;
  2191. ret = 1;
  2192. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2193. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2194. if (item_size < sizeof(*ei)) {
  2195. WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  2196. goto out;
  2197. }
  2198. #endif
  2199. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2200. if (item_size != sizeof(*ei) +
  2201. btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
  2202. goto out;
  2203. if (btrfs_extent_generation(leaf, ei) <=
  2204. btrfs_root_last_snapshot(&root->root_item))
  2205. goto out;
  2206. iref = (struct btrfs_extent_inline_ref *)(ei + 1);
  2207. if (btrfs_extent_inline_ref_type(leaf, iref) !=
  2208. BTRFS_EXTENT_DATA_REF_KEY)
  2209. goto out;
  2210. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  2211. if (btrfs_extent_refs(leaf, ei) !=
  2212. btrfs_extent_data_ref_count(leaf, ref) ||
  2213. btrfs_extent_data_ref_root(leaf, ref) !=
  2214. root->root_key.objectid ||
  2215. btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
  2216. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  2217. goto out;
  2218. ret = 0;
  2219. out:
  2220. return ret;
  2221. }
  2222. int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
  2223. struct btrfs_root *root,
  2224. u64 objectid, u64 offset, u64 bytenr)
  2225. {
  2226. struct btrfs_path *path;
  2227. int ret;
  2228. int ret2;
  2229. path = btrfs_alloc_path();
  2230. if (!path)
  2231. return -ENOENT;
  2232. do {
  2233. ret = check_committed_ref(trans, root, path, objectid,
  2234. offset, bytenr);
  2235. if (ret && ret != -ENOENT)
  2236. goto out;
  2237. ret2 = check_delayed_ref(trans, root, path, objectid,
  2238. offset, bytenr);
  2239. } while (ret2 == -EAGAIN);
  2240. if (ret2 && ret2 != -ENOENT) {
  2241. ret = ret2;
  2242. goto out;
  2243. }
  2244. if (ret != -ENOENT || ret2 != -ENOENT)
  2245. ret = 0;
  2246. out:
  2247. btrfs_free_path(path);
  2248. if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
  2249. WARN_ON(ret > 0);
  2250. return ret;
  2251. }
  2252. static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
  2253. struct btrfs_root *root,
  2254. struct extent_buffer *buf,
  2255. int full_backref, int inc)
  2256. {
  2257. u64 bytenr;
  2258. u64 num_bytes;
  2259. u64 parent;
  2260. u64 ref_root;
  2261. u32 nritems;
  2262. struct btrfs_key key;
  2263. struct btrfs_file_extent_item *fi;
  2264. int i;
  2265. int level;
  2266. int ret = 0;
  2267. int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
  2268. u64, u64, u64, u64, u64, u64);
  2269. ref_root = btrfs_header_owner(buf);
  2270. nritems = btrfs_header_nritems(buf);
  2271. level = btrfs_header_level(buf);
  2272. if (!root->ref_cows && level == 0)
  2273. return 0;
  2274. if (inc)
  2275. process_func = btrfs_inc_extent_ref;
  2276. else
  2277. process_func = btrfs_free_extent;
  2278. if (full_backref)
  2279. parent = buf->start;
  2280. else
  2281. parent = 0;
  2282. for (i = 0; i < nritems; i++) {
  2283. if (level == 0) {
  2284. btrfs_item_key_to_cpu(buf, &key, i);
  2285. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  2286. continue;
  2287. fi = btrfs_item_ptr(buf, i,
  2288. struct btrfs_file_extent_item);
  2289. if (btrfs_file_extent_type(buf, fi) ==
  2290. BTRFS_FILE_EXTENT_INLINE)
  2291. continue;
  2292. bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  2293. if (bytenr == 0)
  2294. continue;
  2295. num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
  2296. key.offset -= btrfs_file_extent_offset(buf, fi);
  2297. ret = process_func(trans, root, bytenr, num_bytes,
  2298. parent, ref_root, key.objectid,
  2299. key.offset);
  2300. if (ret)
  2301. goto fail;
  2302. } else {
  2303. bytenr = btrfs_node_blockptr(buf, i);
  2304. num_bytes = btrfs_level_size(root, level - 1);
  2305. ret = process_func(trans, root, bytenr, num_bytes,
  2306. parent, ref_root, level - 1, 0);
  2307. if (ret)
  2308. goto fail;
  2309. }
  2310. }
  2311. return 0;
  2312. fail:
  2313. BUG();
  2314. return ret;
  2315. }
  2316. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2317. struct extent_buffer *buf, int full_backref)
  2318. {
  2319. return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
  2320. }
  2321. int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2322. struct extent_buffer *buf, int full_backref)
  2323. {
  2324. return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
  2325. }
  2326. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  2327. struct btrfs_root *root,
  2328. struct btrfs_path *path,
  2329. struct btrfs_block_group_cache *cache)
  2330. {
  2331. int ret;
  2332. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2333. unsigned long bi;
  2334. struct extent_buffer *leaf;
  2335. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  2336. if (ret < 0)
  2337. goto fail;
  2338. BUG_ON(ret);
  2339. leaf = path->nodes[0];
  2340. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  2341. write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
  2342. btrfs_mark_buffer_dirty(leaf);
  2343. btrfs_release_path(path);
  2344. fail:
  2345. if (ret)
  2346. return ret;
  2347. return 0;
  2348. }
  2349. static struct btrfs_block_group_cache *
  2350. next_block_group(struct btrfs_root *root,
  2351. struct btrfs_block_group_cache *cache)
  2352. {
  2353. struct rb_node *node;
  2354. spin_lock(&root->fs_info->block_group_cache_lock);
  2355. node = rb_next(&cache->cache_node);
  2356. btrfs_put_block_group(cache);
  2357. if (node) {
  2358. cache = rb_entry(node, struct btrfs_block_group_cache,
  2359. cache_node);
  2360. btrfs_get_block_group(cache);
  2361. } else
  2362. cache = NULL;
  2363. spin_unlock(&root->fs_info->block_group_cache_lock);
  2364. return cache;
  2365. }
  2366. static int cache_save_setup(struct btrfs_block_group_cache *block_group,
  2367. struct btrfs_trans_handle *trans,
  2368. struct btrfs_path *path)
  2369. {
  2370. struct btrfs_root *root = block_group->fs_info->tree_root;
  2371. struct inode *inode = NULL;
  2372. u64 alloc_hint = 0;
  2373. int dcs = BTRFS_DC_ERROR;
  2374. int num_pages = 0;
  2375. int retries = 0;
  2376. int ret = 0;
  2377. /*
  2378. * If this block group is smaller than 100 megs don't bother caching the
  2379. * block group.
  2380. */
  2381. if (block_group->key.offset < (100 * 1024 * 1024)) {
  2382. spin_lock(&block_group->lock);
  2383. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  2384. spin_unlock(&block_group->lock);
  2385. return 0;
  2386. }
  2387. again:
  2388. inode = lookup_free_space_inode(root, block_group, path);
  2389. if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
  2390. ret = PTR_ERR(inode);
  2391. btrfs_release_path(path);
  2392. goto out;
  2393. }
  2394. if (IS_ERR(inode)) {
  2395. BUG_ON(retries);
  2396. retries++;
  2397. if (block_group->ro)
  2398. goto out_free;
  2399. ret = create_free_space_inode(root, trans, block_group, path);
  2400. if (ret)
  2401. goto out_free;
  2402. goto again;
  2403. }
  2404. /*
  2405. * We want to set the generation to 0, that way if anything goes wrong
  2406. * from here on out we know not to trust this cache when we load up next
  2407. * time.
  2408. */
  2409. BTRFS_I(inode)->generation = 0;
  2410. ret = btrfs_update_inode(trans, root, inode);
  2411. WARN_ON(ret);
  2412. if (i_size_read(inode) > 0) {
  2413. ret = btrfs_truncate_free_space_cache(root, trans, path,
  2414. inode);
  2415. if (ret)
  2416. goto out_put;
  2417. }
  2418. spin_lock(&block_group->lock);
  2419. if (block_group->cached != BTRFS_CACHE_FINISHED) {
  2420. /* We're not cached, don't bother trying to write stuff out */
  2421. dcs = BTRFS_DC_WRITTEN;
  2422. spin_unlock(&block_group->lock);
  2423. goto out_put;
  2424. }
  2425. spin_unlock(&block_group->lock);
  2426. num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
  2427. if (!num_pages)
  2428. num_pages = 1;
  2429. /*
  2430. * Just to make absolutely sure we have enough space, we're going to
  2431. * preallocate 12 pages worth of space for each block group. In
  2432. * practice we ought to use at most 8, but we need extra space so we can
  2433. * add our header and have a terminator between the extents and the
  2434. * bitmaps.
  2435. */
  2436. num_pages *= 16;
  2437. num_pages *= PAGE_CACHE_SIZE;
  2438. ret = btrfs_check_data_free_space(inode, num_pages);
  2439. if (ret)
  2440. goto out_put;
  2441. ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
  2442. num_pages, num_pages,
  2443. &alloc_hint);
  2444. if (!ret)
  2445. dcs = BTRFS_DC_SETUP;
  2446. btrfs_free_reserved_data_space(inode, num_pages);
  2447. out_put:
  2448. iput(inode);
  2449. out_free:
  2450. btrfs_release_path(path);
  2451. out:
  2452. spin_lock(&block_group->lock);
  2453. block_group->disk_cache_state = dcs;
  2454. spin_unlock(&block_group->lock);
  2455. return ret;
  2456. }
  2457. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  2458. struct btrfs_root *root)
  2459. {
  2460. struct btrfs_block_group_cache *cache;
  2461. int err = 0;
  2462. struct btrfs_path *path;
  2463. u64 last = 0;
  2464. path = btrfs_alloc_path();
  2465. if (!path)
  2466. return -ENOMEM;
  2467. again:
  2468. while (1) {
  2469. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2470. while (cache) {
  2471. if (cache->disk_cache_state == BTRFS_DC_CLEAR)
  2472. break;
  2473. cache = next_block_group(root, cache);
  2474. }
  2475. if (!cache) {
  2476. if (last == 0)
  2477. break;
  2478. last = 0;
  2479. continue;
  2480. }
  2481. err = cache_save_setup(cache, trans, path);
  2482. last = cache->key.objectid + cache->key.offset;
  2483. btrfs_put_block_group(cache);
  2484. }
  2485. while (1) {
  2486. if (last == 0) {
  2487. err = btrfs_run_delayed_refs(trans, root,
  2488. (unsigned long)-1);
  2489. BUG_ON(err);
  2490. }
  2491. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2492. while (cache) {
  2493. if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
  2494. btrfs_put_block_group(cache);
  2495. goto again;
  2496. }
  2497. if (cache->dirty)
  2498. break;
  2499. cache = next_block_group(root, cache);
  2500. }
  2501. if (!cache) {
  2502. if (last == 0)
  2503. break;
  2504. last = 0;
  2505. continue;
  2506. }
  2507. if (cache->disk_cache_state == BTRFS_DC_SETUP)
  2508. cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
  2509. cache->dirty = 0;
  2510. last = cache->key.objectid + cache->key.offset;
  2511. err = write_one_cache_group(trans, root, path, cache);
  2512. BUG_ON(err);
  2513. btrfs_put_block_group(cache);
  2514. }
  2515. while (1) {
  2516. /*
  2517. * I don't think this is needed since we're just marking our
  2518. * preallocated extent as written, but just in case it can't
  2519. * hurt.
  2520. */
  2521. if (last == 0) {
  2522. err = btrfs_run_delayed_refs(trans, root,
  2523. (unsigned long)-1);
  2524. BUG_ON(err);
  2525. }
  2526. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2527. while (cache) {
  2528. /*
  2529. * Really this shouldn't happen, but it could if we
  2530. * couldn't write the entire preallocated extent and
  2531. * splitting the extent resulted in a new block.
  2532. */
  2533. if (cache->dirty) {
  2534. btrfs_put_block_group(cache);
  2535. goto again;
  2536. }
  2537. if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  2538. break;
  2539. cache = next_block_group(root, cache);
  2540. }
  2541. if (!cache) {
  2542. if (last == 0)
  2543. break;
  2544. last = 0;
  2545. continue;
  2546. }
  2547. btrfs_write_out_cache(root, trans, cache, path);
  2548. /*
  2549. * If we didn't have an error then the cache state is still
  2550. * NEED_WRITE, so we can set it to WRITTEN.
  2551. */
  2552. if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  2553. cache->disk_cache_state = BTRFS_DC_WRITTEN;
  2554. last = cache->key.objectid + cache->key.offset;
  2555. btrfs_put_block_group(cache);
  2556. }
  2557. btrfs_free_path(path);
  2558. return 0;
  2559. }
  2560. int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
  2561. {
  2562. struct btrfs_block_group_cache *block_group;
  2563. int readonly = 0;
  2564. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  2565. if (!block_group || block_group->ro)
  2566. readonly = 1;
  2567. if (block_group)
  2568. btrfs_put_block_group(block_group);
  2569. return readonly;
  2570. }
  2571. static int update_space_info(struct btrfs_fs_info *info, u64 flags,
  2572. u64 total_bytes, u64 bytes_used,
  2573. struct btrfs_space_info **space_info)
  2574. {
  2575. struct btrfs_space_info *found;
  2576. int i;
  2577. int factor;
  2578. if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
  2579. BTRFS_BLOCK_GROUP_RAID10))
  2580. factor = 2;
  2581. else
  2582. factor = 1;
  2583. found = __find_space_info(info, flags);
  2584. if (found) {
  2585. spin_lock(&found->lock);
  2586. found->total_bytes += total_bytes;
  2587. found->disk_total += total_bytes * factor;
  2588. found->bytes_used += bytes_used;
  2589. found->disk_used += bytes_used * factor;
  2590. found->full = 0;
  2591. spin_unlock(&found->lock);
  2592. *space_info = found;
  2593. return 0;
  2594. }
  2595. found = kzalloc(sizeof(*found), GFP_NOFS);
  2596. if (!found)
  2597. return -ENOMEM;
  2598. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  2599. INIT_LIST_HEAD(&found->block_groups[i]);
  2600. init_rwsem(&found->groups_sem);
  2601. spin_lock_init(&found->lock);
  2602. found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
  2603. BTRFS_BLOCK_GROUP_SYSTEM |
  2604. BTRFS_BLOCK_GROUP_METADATA);
  2605. found->total_bytes = total_bytes;
  2606. found->disk_total = total_bytes * factor;
  2607. found->bytes_used = bytes_used;
  2608. found->disk_used = bytes_used * factor;
  2609. found->bytes_pinned = 0;
  2610. found->bytes_reserved = 0;
  2611. found->bytes_readonly = 0;
  2612. found->bytes_may_use = 0;
  2613. found->full = 0;
  2614. found->force_alloc = CHUNK_ALLOC_NO_FORCE;
  2615. found->chunk_alloc = 0;
  2616. *space_info = found;
  2617. list_add_rcu(&found->list, &info->space_info);
  2618. atomic_set(&found->caching_threads, 0);
  2619. return 0;
  2620. }
  2621. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  2622. {
  2623. u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
  2624. BTRFS_BLOCK_GROUP_RAID1 |
  2625. BTRFS_BLOCK_GROUP_RAID10 |
  2626. BTRFS_BLOCK_GROUP_DUP);
  2627. if (extra_flags) {
  2628. if (flags & BTRFS_BLOCK_GROUP_DATA)
  2629. fs_info->avail_data_alloc_bits |= extra_flags;
  2630. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  2631. fs_info->avail_metadata_alloc_bits |= extra_flags;
  2632. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  2633. fs_info->avail_system_alloc_bits |= extra_flags;
  2634. }
  2635. }
  2636. u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
  2637. {
  2638. /*
  2639. * we add in the count of missing devices because we want
  2640. * to make sure that any RAID levels on a degraded FS
  2641. * continue to be honored.
  2642. */
  2643. u64 num_devices = root->fs_info->fs_devices->rw_devices +
  2644. root->fs_info->fs_devices->missing_devices;
  2645. if (num_devices == 1)
  2646. flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
  2647. if (num_devices < 4)
  2648. flags &= ~BTRFS_BLOCK_GROUP_RAID10;
  2649. if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
  2650. (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  2651. BTRFS_BLOCK_GROUP_RAID10))) {
  2652. flags &= ~BTRFS_BLOCK_GROUP_DUP;
  2653. }
  2654. if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
  2655. (flags & BTRFS_BLOCK_GROUP_RAID10)) {
  2656. flags &= ~BTRFS_BLOCK_GROUP_RAID1;
  2657. }
  2658. if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
  2659. ((flags & BTRFS_BLOCK_GROUP_RAID1) |
  2660. (flags & BTRFS_BLOCK_GROUP_RAID10) |
  2661. (flags & BTRFS_BLOCK_GROUP_DUP)))
  2662. flags &= ~BTRFS_BLOCK_GROUP_RAID0;
  2663. return flags;
  2664. }
  2665. static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
  2666. {
  2667. if (flags & BTRFS_BLOCK_GROUP_DATA)
  2668. flags |= root->fs_info->avail_data_alloc_bits &
  2669. root->fs_info->data_alloc_profile;
  2670. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  2671. flags |= root->fs_info->avail_system_alloc_bits &
  2672. root->fs_info->system_alloc_profile;
  2673. else if (flags & BTRFS_BLOCK_GROUP_METADATA)
  2674. flags |= root->fs_info->avail_metadata_alloc_bits &
  2675. root->fs_info->metadata_alloc_profile;
  2676. return btrfs_reduce_alloc_profile(root, flags);
  2677. }
  2678. u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
  2679. {
  2680. u64 flags;
  2681. if (data)
  2682. flags = BTRFS_BLOCK_GROUP_DATA;
  2683. else if (root == root->fs_info->chunk_root)
  2684. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  2685. else
  2686. flags = BTRFS_BLOCK_GROUP_METADATA;
  2687. return get_alloc_profile(root, flags);
  2688. }
  2689. void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
  2690. {
  2691. BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
  2692. BTRFS_BLOCK_GROUP_DATA);
  2693. }
  2694. /*
  2695. * This will check the space that the inode allocates from to make sure we have
  2696. * enough space for bytes.
  2697. */
  2698. int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
  2699. {
  2700. struct btrfs_space_info *data_sinfo;
  2701. struct btrfs_root *root = BTRFS_I(inode)->root;
  2702. u64 used;
  2703. int ret = 0, committed = 0, alloc_chunk = 1;
  2704. /* make sure bytes are sectorsize aligned */
  2705. bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  2706. if (root == root->fs_info->tree_root ||
  2707. BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
  2708. alloc_chunk = 0;
  2709. committed = 1;
  2710. }
  2711. data_sinfo = BTRFS_I(inode)->space_info;
  2712. if (!data_sinfo)
  2713. goto alloc;
  2714. again:
  2715. /* make sure we have enough space to handle the data first */
  2716. spin_lock(&data_sinfo->lock);
  2717. used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
  2718. data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
  2719. data_sinfo->bytes_may_use;
  2720. if (used + bytes > data_sinfo->total_bytes) {
  2721. struct btrfs_trans_handle *trans;
  2722. /*
  2723. * if we don't have enough free bytes in this space then we need
  2724. * to alloc a new chunk.
  2725. */
  2726. if (!data_sinfo->full && alloc_chunk) {
  2727. u64 alloc_target;
  2728. data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
  2729. spin_unlock(&data_sinfo->lock);
  2730. alloc:
  2731. alloc_target = btrfs_get_alloc_profile(root, 1);
  2732. trans = btrfs_join_transaction(root, 1);
  2733. if (IS_ERR(trans))
  2734. return PTR_ERR(trans);
  2735. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  2736. bytes + 2 * 1024 * 1024,
  2737. alloc_target,
  2738. CHUNK_ALLOC_NO_FORCE);
  2739. btrfs_end_transaction(trans, root);
  2740. if (ret < 0) {
  2741. if (ret != -ENOSPC)
  2742. return ret;
  2743. else
  2744. goto commit_trans;
  2745. }
  2746. if (!data_sinfo) {
  2747. btrfs_set_inode_space_info(root, inode);
  2748. data_sinfo = BTRFS_I(inode)->space_info;
  2749. }
  2750. goto again;
  2751. }
  2752. spin_unlock(&data_sinfo->lock);
  2753. /* commit the current transaction and try again */
  2754. commit_trans:
  2755. if (!committed && !root->fs_info->open_ioctl_trans) {
  2756. committed = 1;
  2757. trans = btrfs_join_transaction(root, 1);
  2758. if (IS_ERR(trans))
  2759. return PTR_ERR(trans);
  2760. ret = btrfs_commit_transaction(trans, root);
  2761. if (ret)
  2762. return ret;
  2763. goto again;
  2764. }
  2765. return -ENOSPC;
  2766. }
  2767. data_sinfo->bytes_may_use += bytes;
  2768. BTRFS_I(inode)->reserved_bytes += bytes;
  2769. spin_unlock(&data_sinfo->lock);
  2770. return 0;
  2771. }
  2772. /*
  2773. * called when we are clearing an delalloc extent from the
  2774. * inode's io_tree or there was an error for whatever reason
  2775. * after calling btrfs_check_data_free_space
  2776. */
  2777. void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
  2778. {
  2779. struct btrfs_root *root = BTRFS_I(inode)->root;
  2780. struct btrfs_space_info *data_sinfo;
  2781. /* make sure bytes are sectorsize aligned */
  2782. bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  2783. data_sinfo = BTRFS_I(inode)->space_info;
  2784. spin_lock(&data_sinfo->lock);
  2785. data_sinfo->bytes_may_use -= bytes;
  2786. BTRFS_I(inode)->reserved_bytes -= bytes;
  2787. spin_unlock(&data_sinfo->lock);
  2788. }
  2789. static void force_metadata_allocation(struct btrfs_fs_info *info)
  2790. {
  2791. struct list_head *head = &info->space_info;
  2792. struct btrfs_space_info *found;
  2793. rcu_read_lock();
  2794. list_for_each_entry_rcu(found, head, list) {
  2795. if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
  2796. found->force_alloc = CHUNK_ALLOC_FORCE;
  2797. }
  2798. rcu_read_unlock();
  2799. }
  2800. static int should_alloc_chunk(struct btrfs_root *root,
  2801. struct btrfs_space_info *sinfo, u64 alloc_bytes,
  2802. int force)
  2803. {
  2804. u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
  2805. u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
  2806. u64 thresh;
  2807. if (force == CHUNK_ALLOC_FORCE)
  2808. return 1;
  2809. /*
  2810. * in limited mode, we want to have some free space up to
  2811. * about 1% of the FS size.
  2812. */
  2813. if (force == CHUNK_ALLOC_LIMITED) {
  2814. thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
  2815. thresh = max_t(u64, 64 * 1024 * 1024,
  2816. div_factor_fine(thresh, 1));
  2817. if (num_bytes - num_allocated < thresh)
  2818. return 1;
  2819. }
  2820. /*
  2821. * we have two similar checks here, one based on percentage
  2822. * and once based on a hard number of 256MB. The idea
  2823. * is that if we have a good amount of free
  2824. * room, don't allocate a chunk. A good mount is
  2825. * less than 80% utilized of the chunks we have allocated,
  2826. * or more than 256MB free
  2827. */
  2828. if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
  2829. return 0;
  2830. if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
  2831. return 0;
  2832. thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
  2833. /* 256MB or 5% of the FS */
  2834. thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
  2835. if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
  2836. return 0;
  2837. return 1;
  2838. }
  2839. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  2840. struct btrfs_root *extent_root, u64 alloc_bytes,
  2841. u64 flags, int force)
  2842. {
  2843. struct btrfs_space_info *space_info;
  2844. struct btrfs_fs_info *fs_info = extent_root->fs_info;
  2845. int wait_for_alloc = 0;
  2846. int ret = 0;
  2847. flags = btrfs_reduce_alloc_profile(extent_root, flags);
  2848. space_info = __find_space_info(extent_root->fs_info, flags);
  2849. if (!space_info) {
  2850. ret = update_space_info(extent_root->fs_info, flags,
  2851. 0, 0, &space_info);
  2852. BUG_ON(ret);
  2853. }
  2854. BUG_ON(!space_info);
  2855. again:
  2856. spin_lock(&space_info->lock);
  2857. if (space_info->force_alloc)
  2858. force = space_info->force_alloc;
  2859. if (space_info->full) {
  2860. spin_unlock(&space_info->lock);
  2861. return 0;
  2862. }
  2863. if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
  2864. spin_unlock(&space_info->lock);
  2865. return 0;
  2866. } else if (space_info->chunk_alloc) {
  2867. wait_for_alloc = 1;
  2868. } else {
  2869. space_info->chunk_alloc = 1;
  2870. }
  2871. spin_unlock(&space_info->lock);
  2872. mutex_lock(&fs_info->chunk_mutex);
  2873. /*
  2874. * The chunk_mutex is held throughout the entirety of a chunk
  2875. * allocation, so once we've acquired the chunk_mutex we know that the
  2876. * other guy is done and we need to recheck and see if we should
  2877. * allocate.
  2878. */
  2879. if (wait_for_alloc) {
  2880. mutex_unlock(&fs_info->chunk_mutex);
  2881. wait_for_alloc = 0;
  2882. goto again;
  2883. }
  2884. /*
  2885. * If we have mixed data/metadata chunks we want to make sure we keep
  2886. * allocating mixed chunks instead of individual chunks.
  2887. */
  2888. if (btrfs_mixed_space_info(space_info))
  2889. flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
  2890. /*
  2891. * if we're doing a data chunk, go ahead and make sure that
  2892. * we keep a reasonable number of metadata chunks allocated in the
  2893. * FS as well.
  2894. */
  2895. if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
  2896. fs_info->data_chunk_allocations++;
  2897. if (!(fs_info->data_chunk_allocations %
  2898. fs_info->metadata_ratio))
  2899. force_metadata_allocation(fs_info);
  2900. }
  2901. ret = btrfs_alloc_chunk(trans, extent_root, flags);
  2902. spin_lock(&space_info->lock);
  2903. if (ret)
  2904. space_info->full = 1;
  2905. else
  2906. ret = 1;
  2907. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  2908. space_info->chunk_alloc = 0;
  2909. spin_unlock(&space_info->lock);
  2910. mutex_unlock(&extent_root->fs_info->chunk_mutex);
  2911. return ret;
  2912. }
  2913. /*
  2914. * shrink metadata reservation for delalloc
  2915. */
  2916. static int shrink_delalloc(struct btrfs_trans_handle *trans,
  2917. struct btrfs_root *root, u64 to_reclaim, int sync)
  2918. {
  2919. struct btrfs_block_rsv *block_rsv;
  2920. struct btrfs_space_info *space_info;
  2921. u64 reserved;
  2922. u64 max_reclaim;
  2923. u64 reclaimed = 0;
  2924. long time_left;
  2925. int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
  2926. int loops = 0;
  2927. unsigned long progress;
  2928. block_rsv = &root->fs_info->delalloc_block_rsv;
  2929. space_info = block_rsv->space_info;
  2930. smp_mb();
  2931. reserved = space_info->bytes_reserved;
  2932. progress = space_info->reservation_progress;
  2933. if (reserved == 0)
  2934. return 0;
  2935. max_reclaim = min(reserved, to_reclaim);
  2936. while (loops < 1024) {
  2937. /* have the flusher threads jump in and do some IO */
  2938. smp_mb();
  2939. nr_pages = min_t(unsigned long, nr_pages,
  2940. root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
  2941. writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
  2942. spin_lock(&space_info->lock);
  2943. if (reserved > space_info->bytes_reserved)
  2944. reclaimed += reserved - space_info->bytes_reserved;
  2945. reserved = space_info->bytes_reserved;
  2946. spin_unlock(&space_info->lock);
  2947. loops++;
  2948. if (reserved == 0 || reclaimed >= max_reclaim)
  2949. break;
  2950. if (trans && trans->transaction->blocked)
  2951. return -EAGAIN;
  2952. time_left = schedule_timeout_interruptible(1);
  2953. /* We were interrupted, exit */
  2954. if (time_left)
  2955. break;
  2956. /* we've kicked the IO a few times, if anything has been freed,
  2957. * exit. There is no sense in looping here for a long time
  2958. * when we really need to commit the transaction, or there are
  2959. * just too many writers without enough free space
  2960. */
  2961. if (loops > 3) {
  2962. smp_mb();
  2963. if (progress != space_info->reservation_progress)
  2964. break;
  2965. }
  2966. }
  2967. return reclaimed >= to_reclaim;
  2968. }
  2969. /*
  2970. * Retries tells us how many times we've called reserve_metadata_bytes. The
  2971. * idea is if this is the first call (retries == 0) then we will add to our
  2972. * reserved count if we can't make the allocation in order to hold our place
  2973. * while we go and try and free up space. That way for retries > 1 we don't try
  2974. * and add space, we just check to see if the amount of unused space is >= the
  2975. * total space, meaning that our reservation is valid.
  2976. *
  2977. * However if we don't intend to retry this reservation, pass -1 as retries so
  2978. * that it short circuits this logic.
  2979. */
  2980. static int reserve_metadata_bytes(struct btrfs_trans_handle *trans,
  2981. struct btrfs_root *root,
  2982. struct btrfs_block_rsv *block_rsv,
  2983. u64 orig_bytes, int flush)
  2984. {
  2985. struct btrfs_space_info *space_info = block_rsv->space_info;
  2986. u64 unused;
  2987. u64 num_bytes = orig_bytes;
  2988. int retries = 0;
  2989. int ret = 0;
  2990. bool reserved = false;
  2991. bool committed = false;
  2992. again:
  2993. ret = -ENOSPC;
  2994. if (reserved)
  2995. num_bytes = 0;
  2996. spin_lock(&space_info->lock);
  2997. unused = space_info->bytes_used + space_info->bytes_reserved +
  2998. space_info->bytes_pinned + space_info->bytes_readonly +
  2999. space_info->bytes_may_use;
  3000. /*
  3001. * The idea here is that we've not already over-reserved the block group
  3002. * then we can go ahead and save our reservation first and then start
  3003. * flushing if we need to. Otherwise if we've already overcommitted
  3004. * lets start flushing stuff first and then come back and try to make
  3005. * our reservation.
  3006. */
  3007. if (unused <= space_info->total_bytes) {
  3008. unused = space_info->total_bytes - unused;
  3009. if (unused >= num_bytes) {
  3010. if (!reserved)
  3011. space_info->bytes_reserved += orig_bytes;
  3012. ret = 0;
  3013. } else {
  3014. /*
  3015. * Ok set num_bytes to orig_bytes since we aren't
  3016. * overocmmitted, this way we only try and reclaim what
  3017. * we need.
  3018. */
  3019. num_bytes = orig_bytes;
  3020. }
  3021. } else {
  3022. /*
  3023. * Ok we're over committed, set num_bytes to the overcommitted
  3024. * amount plus the amount of bytes that we need for this
  3025. * reservation.
  3026. */
  3027. num_bytes = unused - space_info->total_bytes +
  3028. (orig_bytes * (retries + 1));
  3029. }
  3030. /*
  3031. * Couldn't make our reservation, save our place so while we're trying
  3032. * to reclaim space we can actually use it instead of somebody else
  3033. * stealing it from us.
  3034. */
  3035. if (ret && !reserved) {
  3036. space_info->bytes_reserved += orig_bytes;
  3037. reserved = true;
  3038. }
  3039. spin_unlock(&space_info->lock);
  3040. if (!ret)
  3041. return 0;
  3042. if (!flush)
  3043. goto out;
  3044. /*
  3045. * We do synchronous shrinking since we don't actually unreserve
  3046. * metadata until after the IO is completed.
  3047. */
  3048. ret = shrink_delalloc(trans, root, num_bytes, 1);
  3049. if (ret > 0)
  3050. return 0;
  3051. else if (ret < 0)
  3052. goto out;
  3053. /*
  3054. * So if we were overcommitted it's possible that somebody else flushed
  3055. * out enough space and we simply didn't have enough space to reclaim,
  3056. * so go back around and try again.
  3057. */
  3058. if (retries < 2) {
  3059. retries++;
  3060. goto again;
  3061. }
  3062. spin_lock(&space_info->lock);
  3063. /*
  3064. * Not enough space to be reclaimed, don't bother committing the
  3065. * transaction.
  3066. */
  3067. if (space_info->bytes_pinned < orig_bytes)
  3068. ret = -ENOSPC;
  3069. spin_unlock(&space_info->lock);
  3070. if (ret)
  3071. goto out;
  3072. ret = -EAGAIN;
  3073. if (trans || committed)
  3074. goto out;
  3075. ret = -ENOSPC;
  3076. trans = btrfs_join_transaction(root, 1);
  3077. if (IS_ERR(trans))
  3078. goto out;
  3079. ret = btrfs_commit_transaction(trans, root);
  3080. if (!ret) {
  3081. trans = NULL;
  3082. committed = true;
  3083. goto again;
  3084. }
  3085. out:
  3086. if (reserved) {
  3087. spin_lock(&space_info->lock);
  3088. space_info->bytes_reserved -= orig_bytes;
  3089. spin_unlock(&space_info->lock);
  3090. }
  3091. return ret;
  3092. }
  3093. static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
  3094. struct btrfs_root *root)
  3095. {
  3096. struct btrfs_block_rsv *block_rsv;
  3097. if (root->ref_cows)
  3098. block_rsv = trans->block_rsv;
  3099. else
  3100. block_rsv = root->block_rsv;
  3101. if (!block_rsv)
  3102. block_rsv = &root->fs_info->empty_block_rsv;
  3103. return block_rsv;
  3104. }
  3105. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  3106. u64 num_bytes)
  3107. {
  3108. int ret = -ENOSPC;
  3109. spin_lock(&block_rsv->lock);
  3110. if (block_rsv->reserved >= num_bytes) {
  3111. block_rsv->reserved -= num_bytes;
  3112. if (block_rsv->reserved < block_rsv->size)
  3113. block_rsv->full = 0;
  3114. ret = 0;
  3115. }
  3116. spin_unlock(&block_rsv->lock);
  3117. return ret;
  3118. }
  3119. static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
  3120. u64 num_bytes, int update_size)
  3121. {
  3122. spin_lock(&block_rsv->lock);
  3123. block_rsv->reserved += num_bytes;
  3124. if (update_size)
  3125. block_rsv->size += num_bytes;
  3126. else if (block_rsv->reserved >= block_rsv->size)
  3127. block_rsv->full = 1;
  3128. spin_unlock(&block_rsv->lock);
  3129. }
  3130. static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
  3131. struct btrfs_block_rsv *dest, u64 num_bytes)
  3132. {
  3133. struct btrfs_space_info *space_info = block_rsv->space_info;
  3134. spin_lock(&block_rsv->lock);
  3135. if (num_bytes == (u64)-1)
  3136. num_bytes = block_rsv->size;
  3137. block_rsv->size -= num_bytes;
  3138. if (block_rsv->reserved >= block_rsv->size) {
  3139. num_bytes = block_rsv->reserved - block_rsv->size;
  3140. block_rsv->reserved = block_rsv->size;
  3141. block_rsv->full = 1;
  3142. } else {
  3143. num_bytes = 0;
  3144. }
  3145. spin_unlock(&block_rsv->lock);
  3146. if (num_bytes > 0) {
  3147. if (dest) {
  3148. spin_lock(&dest->lock);
  3149. if (!dest->full) {
  3150. u64 bytes_to_add;
  3151. bytes_to_add = dest->size - dest->reserved;
  3152. bytes_to_add = min(num_bytes, bytes_to_add);
  3153. dest->reserved += bytes_to_add;
  3154. if (dest->reserved >= dest->size)
  3155. dest->full = 1;
  3156. num_bytes -= bytes_to_add;
  3157. }
  3158. spin_unlock(&dest->lock);
  3159. }
  3160. if (num_bytes) {
  3161. spin_lock(&space_info->lock);
  3162. space_info->bytes_reserved -= num_bytes;
  3163. space_info->reservation_progress++;
  3164. spin_unlock(&space_info->lock);
  3165. }
  3166. }
  3167. }
  3168. static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
  3169. struct btrfs_block_rsv *dst, u64 num_bytes)
  3170. {
  3171. int ret;
  3172. ret = block_rsv_use_bytes(src, num_bytes);
  3173. if (ret)
  3174. return ret;
  3175. block_rsv_add_bytes(dst, num_bytes, 1);
  3176. return 0;
  3177. }
  3178. void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
  3179. {
  3180. memset(rsv, 0, sizeof(*rsv));
  3181. spin_lock_init(&rsv->lock);
  3182. atomic_set(&rsv->usage, 1);
  3183. rsv->priority = 6;
  3184. INIT_LIST_HEAD(&rsv->list);
  3185. }
  3186. struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
  3187. {
  3188. struct btrfs_block_rsv *block_rsv;
  3189. struct btrfs_fs_info *fs_info = root->fs_info;
  3190. block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
  3191. if (!block_rsv)
  3192. return NULL;
  3193. btrfs_init_block_rsv(block_rsv);
  3194. block_rsv->space_info = __find_space_info(fs_info,
  3195. BTRFS_BLOCK_GROUP_METADATA);
  3196. return block_rsv;
  3197. }
  3198. void btrfs_free_block_rsv(struct btrfs_root *root,
  3199. struct btrfs_block_rsv *rsv)
  3200. {
  3201. if (rsv && atomic_dec_and_test(&rsv->usage)) {
  3202. btrfs_block_rsv_release(root, rsv, (u64)-1);
  3203. if (!rsv->durable)
  3204. kfree(rsv);
  3205. }
  3206. }
  3207. /*
  3208. * make the block_rsv struct be able to capture freed space.
  3209. * the captured space will re-add to the the block_rsv struct
  3210. * after transaction commit
  3211. */
  3212. void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
  3213. struct btrfs_block_rsv *block_rsv)
  3214. {
  3215. block_rsv->durable = 1;
  3216. mutex_lock(&fs_info->durable_block_rsv_mutex);
  3217. list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
  3218. mutex_unlock(&fs_info->durable_block_rsv_mutex);
  3219. }
  3220. int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
  3221. struct btrfs_root *root,
  3222. struct btrfs_block_rsv *block_rsv,
  3223. u64 num_bytes)
  3224. {
  3225. int ret;
  3226. if (num_bytes == 0)
  3227. return 0;
  3228. ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1);
  3229. if (!ret) {
  3230. block_rsv_add_bytes(block_rsv, num_bytes, 1);
  3231. return 0;
  3232. }
  3233. return ret;
  3234. }
  3235. int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
  3236. struct btrfs_root *root,
  3237. struct btrfs_block_rsv *block_rsv,
  3238. u64 min_reserved, int min_factor)
  3239. {
  3240. u64 num_bytes = 0;
  3241. int commit_trans = 0;
  3242. int ret = -ENOSPC;
  3243. if (!block_rsv)
  3244. return 0;
  3245. spin_lock(&block_rsv->lock);
  3246. if (min_factor > 0)
  3247. num_bytes = div_factor(block_rsv->size, min_factor);
  3248. if (min_reserved > num_bytes)
  3249. num_bytes = min_reserved;
  3250. if (block_rsv->reserved >= num_bytes) {
  3251. ret = 0;
  3252. } else {
  3253. num_bytes -= block_rsv->reserved;
  3254. if (block_rsv->durable &&
  3255. block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
  3256. commit_trans = 1;
  3257. }
  3258. spin_unlock(&block_rsv->lock);
  3259. if (!ret)
  3260. return 0;
  3261. if (block_rsv->refill_used) {
  3262. ret = reserve_metadata_bytes(trans, root, block_rsv,
  3263. num_bytes, 0);
  3264. if (!ret) {
  3265. block_rsv_add_bytes(block_rsv, num_bytes, 0);
  3266. return 0;
  3267. }
  3268. }
  3269. if (commit_trans) {
  3270. if (trans)
  3271. return -EAGAIN;
  3272. trans = btrfs_join_transaction(root, 1);
  3273. BUG_ON(IS_ERR(trans));
  3274. ret = btrfs_commit_transaction(trans, root);
  3275. return 0;
  3276. }
  3277. return -ENOSPC;
  3278. }
  3279. int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
  3280. struct btrfs_block_rsv *dst_rsv,
  3281. u64 num_bytes)
  3282. {
  3283. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  3284. }
  3285. void btrfs_block_rsv_release(struct btrfs_root *root,
  3286. struct btrfs_block_rsv *block_rsv,
  3287. u64 num_bytes)
  3288. {
  3289. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3290. if (global_rsv->full || global_rsv == block_rsv ||
  3291. block_rsv->space_info != global_rsv->space_info)
  3292. global_rsv = NULL;
  3293. block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
  3294. }
  3295. /*
  3296. * helper to calculate size of global block reservation.
  3297. * the desired value is sum of space used by extent tree,
  3298. * checksum tree and root tree
  3299. */
  3300. static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
  3301. {
  3302. struct btrfs_space_info *sinfo;
  3303. u64 num_bytes;
  3304. u64 meta_used;
  3305. u64 data_used;
  3306. int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
  3307. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
  3308. spin_lock(&sinfo->lock);
  3309. data_used = sinfo->bytes_used;
  3310. spin_unlock(&sinfo->lock);
  3311. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3312. spin_lock(&sinfo->lock);
  3313. if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
  3314. data_used = 0;
  3315. meta_used = sinfo->bytes_used;
  3316. spin_unlock(&sinfo->lock);
  3317. num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
  3318. csum_size * 2;
  3319. num_bytes += div64_u64(data_used + meta_used, 50);
  3320. if (num_bytes * 3 > meta_used)
  3321. num_bytes = div64_u64(meta_used, 3);
  3322. return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
  3323. }
  3324. static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
  3325. {
  3326. struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
  3327. struct btrfs_space_info *sinfo = block_rsv->space_info;
  3328. u64 num_bytes;
  3329. num_bytes = calc_global_metadata_size(fs_info);
  3330. spin_lock(&block_rsv->lock);
  3331. spin_lock(&sinfo->lock);
  3332. block_rsv->size = num_bytes;
  3333. num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
  3334. sinfo->bytes_reserved + sinfo->bytes_readonly +
  3335. sinfo->bytes_may_use;
  3336. if (sinfo->total_bytes > num_bytes) {
  3337. num_bytes = sinfo->total_bytes - num_bytes;
  3338. block_rsv->reserved += num_bytes;
  3339. sinfo->bytes_reserved += num_bytes;
  3340. }
  3341. if (block_rsv->reserved >= block_rsv->size) {
  3342. num_bytes = block_rsv->reserved - block_rsv->size;
  3343. sinfo->bytes_reserved -= num_bytes;
  3344. sinfo->reservation_progress++;
  3345. block_rsv->reserved = block_rsv->size;
  3346. block_rsv->full = 1;
  3347. }
  3348. spin_unlock(&sinfo->lock);
  3349. spin_unlock(&block_rsv->lock);
  3350. }
  3351. static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
  3352. {
  3353. struct btrfs_space_info *space_info;
  3354. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3355. fs_info->chunk_block_rsv.space_info = space_info;
  3356. fs_info->chunk_block_rsv.priority = 10;
  3357. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3358. fs_info->global_block_rsv.space_info = space_info;
  3359. fs_info->global_block_rsv.priority = 10;
  3360. fs_info->global_block_rsv.refill_used = 1;
  3361. fs_info->delalloc_block_rsv.space_info = space_info;
  3362. fs_info->trans_block_rsv.space_info = space_info;
  3363. fs_info->empty_block_rsv.space_info = space_info;
  3364. fs_info->empty_block_rsv.priority = 10;
  3365. fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
  3366. fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
  3367. fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
  3368. fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
  3369. fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
  3370. btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
  3371. btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
  3372. update_global_block_rsv(fs_info);
  3373. }
  3374. static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
  3375. {
  3376. block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
  3377. WARN_ON(fs_info->delalloc_block_rsv.size > 0);
  3378. WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
  3379. WARN_ON(fs_info->trans_block_rsv.size > 0);
  3380. WARN_ON(fs_info->trans_block_rsv.reserved > 0);
  3381. WARN_ON(fs_info->chunk_block_rsv.size > 0);
  3382. WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
  3383. }
  3384. int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
  3385. struct btrfs_root *root,
  3386. int num_items)
  3387. {
  3388. u64 num_bytes;
  3389. int ret;
  3390. if (num_items == 0 || root->fs_info->chunk_root == root)
  3391. return 0;
  3392. num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
  3393. ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
  3394. num_bytes);
  3395. if (!ret) {
  3396. trans->bytes_reserved += num_bytes;
  3397. trans->block_rsv = &root->fs_info->trans_block_rsv;
  3398. }
  3399. return ret;
  3400. }
  3401. void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
  3402. struct btrfs_root *root)
  3403. {
  3404. if (!trans->bytes_reserved)
  3405. return;
  3406. BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
  3407. btrfs_block_rsv_release(root, trans->block_rsv,
  3408. trans->bytes_reserved);
  3409. trans->bytes_reserved = 0;
  3410. }
  3411. int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
  3412. struct inode *inode)
  3413. {
  3414. struct btrfs_root *root = BTRFS_I(inode)->root;
  3415. struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
  3416. struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
  3417. /*
  3418. * one for deleting orphan item, one for updating inode and
  3419. * two for calling btrfs_truncate_inode_items.
  3420. *
  3421. * btrfs_truncate_inode_items is a delete operation, it frees
  3422. * more space than it uses in most cases. So two units of
  3423. * metadata space should be enough for calling it many times.
  3424. * If all of the metadata space is used, we can commit
  3425. * transaction and use space it freed.
  3426. */
  3427. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
  3428. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  3429. }
  3430. void btrfs_orphan_release_metadata(struct inode *inode)
  3431. {
  3432. struct btrfs_root *root = BTRFS_I(inode)->root;
  3433. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
  3434. btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
  3435. }
  3436. int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
  3437. struct btrfs_pending_snapshot *pending)
  3438. {
  3439. struct btrfs_root *root = pending->root;
  3440. struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
  3441. struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
  3442. /*
  3443. * two for root back/forward refs, two for directory entries
  3444. * and one for root of the snapshot.
  3445. */
  3446. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
  3447. dst_rsv->space_info = src_rsv->space_info;
  3448. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  3449. }
  3450. static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
  3451. {
  3452. return num_bytes >>= 3;
  3453. }
  3454. int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
  3455. {
  3456. struct btrfs_root *root = BTRFS_I(inode)->root;
  3457. struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
  3458. u64 to_reserve;
  3459. int nr_extents;
  3460. int reserved_extents;
  3461. int ret;
  3462. if (btrfs_transaction_in_commit(root->fs_info))
  3463. schedule_timeout(1);
  3464. num_bytes = ALIGN(num_bytes, root->sectorsize);
  3465. nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
  3466. reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
  3467. if (nr_extents > reserved_extents) {
  3468. nr_extents -= reserved_extents;
  3469. to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
  3470. } else {
  3471. nr_extents = 0;
  3472. to_reserve = 0;
  3473. }
  3474. to_reserve += calc_csum_metadata_size(inode, num_bytes);
  3475. ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
  3476. if (ret)
  3477. return ret;
  3478. atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents);
  3479. atomic_inc(&BTRFS_I(inode)->outstanding_extents);
  3480. block_rsv_add_bytes(block_rsv, to_reserve, 1);
  3481. if (block_rsv->size > 512 * 1024 * 1024)
  3482. shrink_delalloc(NULL, root, to_reserve, 0);
  3483. return 0;
  3484. }
  3485. void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
  3486. {
  3487. struct btrfs_root *root = BTRFS_I(inode)->root;
  3488. u64 to_free;
  3489. int nr_extents;
  3490. int reserved_extents;
  3491. num_bytes = ALIGN(num_bytes, root->sectorsize);
  3492. atomic_dec(&BTRFS_I(inode)->outstanding_extents);
  3493. WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
  3494. reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
  3495. do {
  3496. int old, new;
  3497. nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
  3498. if (nr_extents >= reserved_extents) {
  3499. nr_extents = 0;
  3500. break;
  3501. }
  3502. old = reserved_extents;
  3503. nr_extents = reserved_extents - nr_extents;
  3504. new = reserved_extents - nr_extents;
  3505. old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents,
  3506. reserved_extents, new);
  3507. if (likely(old == reserved_extents))
  3508. break;
  3509. reserved_extents = old;
  3510. } while (1);
  3511. to_free = calc_csum_metadata_size(inode, num_bytes);
  3512. if (nr_extents > 0)
  3513. to_free += btrfs_calc_trans_metadata_size(root, nr_extents);
  3514. btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
  3515. to_free);
  3516. }
  3517. int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
  3518. {
  3519. int ret;
  3520. ret = btrfs_check_data_free_space(inode, num_bytes);
  3521. if (ret)
  3522. return ret;
  3523. ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
  3524. if (ret) {
  3525. btrfs_free_reserved_data_space(inode, num_bytes);
  3526. return ret;
  3527. }
  3528. return 0;
  3529. }
  3530. void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
  3531. {
  3532. btrfs_delalloc_release_metadata(inode, num_bytes);
  3533. btrfs_free_reserved_data_space(inode, num_bytes);
  3534. }
  3535. static int update_block_group(struct btrfs_trans_handle *trans,
  3536. struct btrfs_root *root,
  3537. u64 bytenr, u64 num_bytes, int alloc)
  3538. {
  3539. struct btrfs_block_group_cache *cache = NULL;
  3540. struct btrfs_fs_info *info = root->fs_info;
  3541. u64 total = num_bytes;
  3542. u64 old_val;
  3543. u64 byte_in_group;
  3544. int factor;
  3545. /* block accounting for super block */
  3546. spin_lock(&info->delalloc_lock);
  3547. old_val = btrfs_super_bytes_used(&info->super_copy);
  3548. if (alloc)
  3549. old_val += num_bytes;
  3550. else
  3551. old_val -= num_bytes;
  3552. btrfs_set_super_bytes_used(&info->super_copy, old_val);
  3553. spin_unlock(&info->delalloc_lock);
  3554. while (total) {
  3555. cache = btrfs_lookup_block_group(info, bytenr);
  3556. if (!cache)
  3557. return -1;
  3558. if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
  3559. BTRFS_BLOCK_GROUP_RAID1 |
  3560. BTRFS_BLOCK_GROUP_RAID10))
  3561. factor = 2;
  3562. else
  3563. factor = 1;
  3564. /*
  3565. * If this block group has free space cache written out, we
  3566. * need to make sure to load it if we are removing space. This
  3567. * is because we need the unpinning stage to actually add the
  3568. * space back to the block group, otherwise we will leak space.
  3569. */
  3570. if (!alloc && cache->cached == BTRFS_CACHE_NO)
  3571. cache_block_group(cache, trans, NULL, 1);
  3572. byte_in_group = bytenr - cache->key.objectid;
  3573. WARN_ON(byte_in_group > cache->key.offset);
  3574. spin_lock(&cache->space_info->lock);
  3575. spin_lock(&cache->lock);
  3576. if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
  3577. cache->disk_cache_state < BTRFS_DC_CLEAR)
  3578. cache->disk_cache_state = BTRFS_DC_CLEAR;
  3579. cache->dirty = 1;
  3580. old_val = btrfs_block_group_used(&cache->item);
  3581. num_bytes = min(total, cache->key.offset - byte_in_group);
  3582. if (alloc) {
  3583. old_val += num_bytes;
  3584. btrfs_set_block_group_used(&cache->item, old_val);
  3585. cache->reserved -= num_bytes;
  3586. cache->space_info->bytes_reserved -= num_bytes;
  3587. cache->space_info->reservation_progress++;
  3588. cache->space_info->bytes_used += num_bytes;
  3589. cache->space_info->disk_used += num_bytes * factor;
  3590. spin_unlock(&cache->lock);
  3591. spin_unlock(&cache->space_info->lock);
  3592. } else {
  3593. old_val -= num_bytes;
  3594. btrfs_set_block_group_used(&cache->item, old_val);
  3595. cache->pinned += num_bytes;
  3596. cache->space_info->bytes_pinned += num_bytes;
  3597. cache->space_info->bytes_used -= num_bytes;
  3598. cache->space_info->disk_used -= num_bytes * factor;
  3599. spin_unlock(&cache->lock);
  3600. spin_unlock(&cache->space_info->lock);
  3601. set_extent_dirty(info->pinned_extents,
  3602. bytenr, bytenr + num_bytes - 1,
  3603. GFP_NOFS | __GFP_NOFAIL);
  3604. }
  3605. btrfs_put_block_group(cache);
  3606. total -= num_bytes;
  3607. bytenr += num_bytes;
  3608. }
  3609. return 0;
  3610. }
  3611. static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
  3612. {
  3613. struct btrfs_block_group_cache *cache;
  3614. u64 bytenr;
  3615. cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
  3616. if (!cache)
  3617. return 0;
  3618. bytenr = cache->key.objectid;
  3619. btrfs_put_block_group(cache);
  3620. return bytenr;
  3621. }
  3622. static int pin_down_extent(struct btrfs_root *root,
  3623. struct btrfs_block_group_cache *cache,
  3624. u64 bytenr, u64 num_bytes, int reserved)
  3625. {
  3626. spin_lock(&cache->space_info->lock);
  3627. spin_lock(&cache->lock);
  3628. cache->pinned += num_bytes;
  3629. cache->space_info->bytes_pinned += num_bytes;
  3630. if (reserved) {
  3631. cache->reserved -= num_bytes;
  3632. cache->space_info->bytes_reserved -= num_bytes;
  3633. cache->space_info->reservation_progress++;
  3634. }
  3635. spin_unlock(&cache->lock);
  3636. spin_unlock(&cache->space_info->lock);
  3637. set_extent_dirty(root->fs_info->pinned_extents, bytenr,
  3638. bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
  3639. return 0;
  3640. }
  3641. /*
  3642. * this function must be called within transaction
  3643. */
  3644. int btrfs_pin_extent(struct btrfs_root *root,
  3645. u64 bytenr, u64 num_bytes, int reserved)
  3646. {
  3647. struct btrfs_block_group_cache *cache;
  3648. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  3649. BUG_ON(!cache);
  3650. pin_down_extent(root, cache, bytenr, num_bytes, reserved);
  3651. btrfs_put_block_group(cache);
  3652. return 0;
  3653. }
  3654. /*
  3655. * update size of reserved extents. this function may return -EAGAIN
  3656. * if 'reserve' is true or 'sinfo' is false.
  3657. */
  3658. int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
  3659. u64 num_bytes, int reserve, int sinfo)
  3660. {
  3661. int ret = 0;
  3662. if (sinfo) {
  3663. struct btrfs_space_info *space_info = cache->space_info;
  3664. spin_lock(&space_info->lock);
  3665. spin_lock(&cache->lock);
  3666. if (reserve) {
  3667. if (cache->ro) {
  3668. ret = -EAGAIN;
  3669. } else {
  3670. cache->reserved += num_bytes;
  3671. space_info->bytes_reserved += num_bytes;
  3672. }
  3673. } else {
  3674. if (cache->ro)
  3675. space_info->bytes_readonly += num_bytes;
  3676. cache->reserved -= num_bytes;
  3677. space_info->bytes_reserved -= num_bytes;
  3678. space_info->reservation_progress++;
  3679. }
  3680. spin_unlock(&cache->lock);
  3681. spin_unlock(&space_info->lock);
  3682. } else {
  3683. spin_lock(&cache->lock);
  3684. if (cache->ro) {
  3685. ret = -EAGAIN;
  3686. } else {
  3687. if (reserve)
  3688. cache->reserved += num_bytes;
  3689. else
  3690. cache->reserved -= num_bytes;
  3691. }
  3692. spin_unlock(&cache->lock);
  3693. }
  3694. return ret;
  3695. }
  3696. int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
  3697. struct btrfs_root *root)
  3698. {
  3699. struct btrfs_fs_info *fs_info = root->fs_info;
  3700. struct btrfs_caching_control *next;
  3701. struct btrfs_caching_control *caching_ctl;
  3702. struct btrfs_block_group_cache *cache;
  3703. down_write(&fs_info->extent_commit_sem);
  3704. list_for_each_entry_safe(caching_ctl, next,
  3705. &fs_info->caching_block_groups, list) {
  3706. cache = caching_ctl->block_group;
  3707. if (block_group_cache_done(cache)) {
  3708. cache->last_byte_to_unpin = (u64)-1;
  3709. list_del_init(&caching_ctl->list);
  3710. put_caching_control(caching_ctl);
  3711. } else {
  3712. cache->last_byte_to_unpin = caching_ctl->progress;
  3713. }
  3714. }
  3715. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  3716. fs_info->pinned_extents = &fs_info->freed_extents[1];
  3717. else
  3718. fs_info->pinned_extents = &fs_info->freed_extents[0];
  3719. up_write(&fs_info->extent_commit_sem);
  3720. update_global_block_rsv(fs_info);
  3721. return 0;
  3722. }
  3723. static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  3724. {
  3725. struct btrfs_fs_info *fs_info = root->fs_info;
  3726. struct btrfs_block_group_cache *cache = NULL;
  3727. u64 len;
  3728. while (start <= end) {
  3729. if (!cache ||
  3730. start >= cache->key.objectid + cache->key.offset) {
  3731. if (cache)
  3732. btrfs_put_block_group(cache);
  3733. cache = btrfs_lookup_block_group(fs_info, start);
  3734. BUG_ON(!cache);
  3735. }
  3736. len = cache->key.objectid + cache->key.offset - start;
  3737. len = min(len, end + 1 - start);
  3738. if (start < cache->last_byte_to_unpin) {
  3739. len = min(len, cache->last_byte_to_unpin - start);
  3740. btrfs_add_free_space(cache, start, len);
  3741. }
  3742. start += len;
  3743. spin_lock(&cache->space_info->lock);
  3744. spin_lock(&cache->lock);
  3745. cache->pinned -= len;
  3746. cache->space_info->bytes_pinned -= len;
  3747. if (cache->ro) {
  3748. cache->space_info->bytes_readonly += len;
  3749. } else if (cache->reserved_pinned > 0) {
  3750. len = min(len, cache->reserved_pinned);
  3751. cache->reserved_pinned -= len;
  3752. cache->space_info->bytes_reserved += len;
  3753. }
  3754. spin_unlock(&cache->lock);
  3755. spin_unlock(&cache->space_info->lock);
  3756. }
  3757. if (cache)
  3758. btrfs_put_block_group(cache);
  3759. return 0;
  3760. }
  3761. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  3762. struct btrfs_root *root)
  3763. {
  3764. struct btrfs_fs_info *fs_info = root->fs_info;
  3765. struct extent_io_tree *unpin;
  3766. struct btrfs_block_rsv *block_rsv;
  3767. struct btrfs_block_rsv *next_rsv;
  3768. u64 start;
  3769. u64 end;
  3770. int idx;
  3771. int ret;
  3772. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  3773. unpin = &fs_info->freed_extents[1];
  3774. else
  3775. unpin = &fs_info->freed_extents[0];
  3776. while (1) {
  3777. ret = find_first_extent_bit(unpin, 0, &start, &end,
  3778. EXTENT_DIRTY);
  3779. if (ret)
  3780. break;
  3781. if (btrfs_test_opt(root, DISCARD))
  3782. ret = btrfs_discard_extent(root, start,
  3783. end + 1 - start, NULL);
  3784. clear_extent_dirty(unpin, start, end, GFP_NOFS);
  3785. unpin_extent_range(root, start, end);
  3786. cond_resched();
  3787. }
  3788. mutex_lock(&fs_info->durable_block_rsv_mutex);
  3789. list_for_each_entry_safe(block_rsv, next_rsv,
  3790. &fs_info->durable_block_rsv_list, list) {
  3791. idx = trans->transid & 0x1;
  3792. if (block_rsv->freed[idx] > 0) {
  3793. block_rsv_add_bytes(block_rsv,
  3794. block_rsv->freed[idx], 0);
  3795. block_rsv->freed[idx] = 0;
  3796. }
  3797. if (atomic_read(&block_rsv->usage) == 0) {
  3798. btrfs_block_rsv_release(root, block_rsv, (u64)-1);
  3799. if (block_rsv->freed[0] == 0 &&
  3800. block_rsv->freed[1] == 0) {
  3801. list_del_init(&block_rsv->list);
  3802. kfree(block_rsv);
  3803. }
  3804. } else {
  3805. btrfs_block_rsv_release(root, block_rsv, 0);
  3806. }
  3807. }
  3808. mutex_unlock(&fs_info->durable_block_rsv_mutex);
  3809. return 0;
  3810. }
  3811. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  3812. struct btrfs_root *root,
  3813. u64 bytenr, u64 num_bytes, u64 parent,
  3814. u64 root_objectid, u64 owner_objectid,
  3815. u64 owner_offset, int refs_to_drop,
  3816. struct btrfs_delayed_extent_op *extent_op)
  3817. {
  3818. struct btrfs_key key;
  3819. struct btrfs_path *path;
  3820. struct btrfs_fs_info *info = root->fs_info;
  3821. struct btrfs_root *extent_root = info->extent_root;
  3822. struct extent_buffer *leaf;
  3823. struct btrfs_extent_item *ei;
  3824. struct btrfs_extent_inline_ref *iref;
  3825. int ret;
  3826. int is_data;
  3827. int extent_slot = 0;
  3828. int found_extent = 0;
  3829. int num_to_del = 1;
  3830. u32 item_size;
  3831. u64 refs;
  3832. path = btrfs_alloc_path();
  3833. if (!path)
  3834. return -ENOMEM;
  3835. path->reada = 1;
  3836. path->leave_spinning = 1;
  3837. is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
  3838. BUG_ON(!is_data && refs_to_drop != 1);
  3839. ret = lookup_extent_backref(trans, extent_root, path, &iref,
  3840. bytenr, num_bytes, parent,
  3841. root_objectid, owner_objectid,
  3842. owner_offset);
  3843. if (ret == 0) {
  3844. extent_slot = path->slots[0];
  3845. while (extent_slot >= 0) {
  3846. btrfs_item_key_to_cpu(path->nodes[0], &key,
  3847. extent_slot);
  3848. if (key.objectid != bytenr)
  3849. break;
  3850. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  3851. key.offset == num_bytes) {
  3852. found_extent = 1;
  3853. break;
  3854. }
  3855. if (path->slots[0] - extent_slot > 5)
  3856. break;
  3857. extent_slot--;
  3858. }
  3859. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  3860. item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
  3861. if (found_extent && item_size < sizeof(*ei))
  3862. found_extent = 0;
  3863. #endif
  3864. if (!found_extent) {
  3865. BUG_ON(iref);
  3866. ret = remove_extent_backref(trans, extent_root, path,
  3867. NULL, refs_to_drop,
  3868. is_data);
  3869. BUG_ON(ret);
  3870. btrfs_release_path(path);
  3871. path->leave_spinning = 1;
  3872. key.objectid = bytenr;
  3873. key.type = BTRFS_EXTENT_ITEM_KEY;
  3874. key.offset = num_bytes;
  3875. ret = btrfs_search_slot(trans, extent_root,
  3876. &key, path, -1, 1);
  3877. if (ret) {
  3878. printk(KERN_ERR "umm, got %d back from search"
  3879. ", was looking for %llu\n", ret,
  3880. (unsigned long long)bytenr);
  3881. btrfs_print_leaf(extent_root, path->nodes[0]);
  3882. }
  3883. BUG_ON(ret);
  3884. extent_slot = path->slots[0];
  3885. }
  3886. } else {
  3887. btrfs_print_leaf(extent_root, path->nodes[0]);
  3888. WARN_ON(1);
  3889. printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
  3890. "parent %llu root %llu owner %llu offset %llu\n",
  3891. (unsigned long long)bytenr,
  3892. (unsigned long long)parent,
  3893. (unsigned long long)root_objectid,
  3894. (unsigned long long)owner_objectid,
  3895. (unsigned long long)owner_offset);
  3896. }
  3897. leaf = path->nodes[0];
  3898. item_size = btrfs_item_size_nr(leaf, extent_slot);
  3899. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  3900. if (item_size < sizeof(*ei)) {
  3901. BUG_ON(found_extent || extent_slot != path->slots[0]);
  3902. ret = convert_extent_item_v0(trans, extent_root, path,
  3903. owner_objectid, 0);
  3904. BUG_ON(ret < 0);
  3905. btrfs_release_path(path);
  3906. path->leave_spinning = 1;
  3907. key.objectid = bytenr;
  3908. key.type = BTRFS_EXTENT_ITEM_KEY;
  3909. key.offset = num_bytes;
  3910. ret = btrfs_search_slot(trans, extent_root, &key, path,
  3911. -1, 1);
  3912. if (ret) {
  3913. printk(KERN_ERR "umm, got %d back from search"
  3914. ", was looking for %llu\n", ret,
  3915. (unsigned long long)bytenr);
  3916. btrfs_print_leaf(extent_root, path->nodes[0]);
  3917. }
  3918. BUG_ON(ret);
  3919. extent_slot = path->slots[0];
  3920. leaf = path->nodes[0];
  3921. item_size = btrfs_item_size_nr(leaf, extent_slot);
  3922. }
  3923. #endif
  3924. BUG_ON(item_size < sizeof(*ei));
  3925. ei = btrfs_item_ptr(leaf, extent_slot,
  3926. struct btrfs_extent_item);
  3927. if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
  3928. struct btrfs_tree_block_info *bi;
  3929. BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
  3930. bi = (struct btrfs_tree_block_info *)(ei + 1);
  3931. WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
  3932. }
  3933. refs = btrfs_extent_refs(leaf, ei);
  3934. BUG_ON(refs < refs_to_drop);
  3935. refs -= refs_to_drop;
  3936. if (refs > 0) {
  3937. if (extent_op)
  3938. __run_delayed_extent_op(extent_op, leaf, ei);
  3939. /*
  3940. * In the case of inline back ref, reference count will
  3941. * be updated by remove_extent_backref
  3942. */
  3943. if (iref) {
  3944. BUG_ON(!found_extent);
  3945. } else {
  3946. btrfs_set_extent_refs(leaf, ei, refs);
  3947. btrfs_mark_buffer_dirty(leaf);
  3948. }
  3949. if (found_extent) {
  3950. ret = remove_extent_backref(trans, extent_root, path,
  3951. iref, refs_to_drop,
  3952. is_data);
  3953. BUG_ON(ret);
  3954. }
  3955. } else {
  3956. if (found_extent) {
  3957. BUG_ON(is_data && refs_to_drop !=
  3958. extent_data_ref_count(root, path, iref));
  3959. if (iref) {
  3960. BUG_ON(path->slots[0] != extent_slot);
  3961. } else {
  3962. BUG_ON(path->slots[0] != extent_slot + 1);
  3963. path->slots[0] = extent_slot;
  3964. num_to_del = 2;
  3965. }
  3966. }
  3967. ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
  3968. num_to_del);
  3969. BUG_ON(ret);
  3970. btrfs_release_path(path);
  3971. if (is_data) {
  3972. ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
  3973. BUG_ON(ret);
  3974. } else {
  3975. invalidate_mapping_pages(info->btree_inode->i_mapping,
  3976. bytenr >> PAGE_CACHE_SHIFT,
  3977. (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
  3978. }
  3979. ret = update_block_group(trans, root, bytenr, num_bytes, 0);
  3980. BUG_ON(ret);
  3981. }
  3982. btrfs_free_path(path);
  3983. return ret;
  3984. }
  3985. /*
  3986. * when we free an block, it is possible (and likely) that we free the last
  3987. * delayed ref for that extent as well. This searches the delayed ref tree for
  3988. * a given extent, and if there are no other delayed refs to be processed, it
  3989. * removes it from the tree.
  3990. */
  3991. static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
  3992. struct btrfs_root *root, u64 bytenr)
  3993. {
  3994. struct btrfs_delayed_ref_head *head;
  3995. struct btrfs_delayed_ref_root *delayed_refs;
  3996. struct btrfs_delayed_ref_node *ref;
  3997. struct rb_node *node;
  3998. int ret = 0;
  3999. delayed_refs = &trans->transaction->delayed_refs;
  4000. spin_lock(&delayed_refs->lock);
  4001. head = btrfs_find_delayed_ref_head(trans, bytenr);
  4002. if (!head)
  4003. goto out;
  4004. node = rb_prev(&head->node.rb_node);
  4005. if (!node)
  4006. goto out;
  4007. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  4008. /* there are still entries for this ref, we can't drop it */
  4009. if (ref->bytenr == bytenr)
  4010. goto out;
  4011. if (head->extent_op) {
  4012. if (!head->must_insert_reserved)
  4013. goto out;
  4014. kfree(head->extent_op);
  4015. head->extent_op = NULL;
  4016. }
  4017. /*
  4018. * waiting for the lock here would deadlock. If someone else has it
  4019. * locked they are already in the process of dropping it anyway
  4020. */
  4021. if (!mutex_trylock(&head->mutex))
  4022. goto out;
  4023. /*
  4024. * at this point we have a head with no other entries. Go
  4025. * ahead and process it.
  4026. */
  4027. head->node.in_tree = 0;
  4028. rb_erase(&head->node.rb_node, &delayed_refs->root);
  4029. delayed_refs->num_entries--;
  4030. /*
  4031. * we don't take a ref on the node because we're removing it from the
  4032. * tree, so we just steal the ref the tree was holding.
  4033. */
  4034. delayed_refs->num_heads--;
  4035. if (list_empty(&head->cluster))
  4036. delayed_refs->num_heads_ready--;
  4037. list_del_init(&head->cluster);
  4038. spin_unlock(&delayed_refs->lock);
  4039. BUG_ON(head->extent_op);
  4040. if (head->must_insert_reserved)
  4041. ret = 1;
  4042. mutex_unlock(&head->mutex);
  4043. btrfs_put_delayed_ref(&head->node);
  4044. return ret;
  4045. out:
  4046. spin_unlock(&delayed_refs->lock);
  4047. return 0;
  4048. }
  4049. void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
  4050. struct btrfs_root *root,
  4051. struct extent_buffer *buf,
  4052. u64 parent, int last_ref)
  4053. {
  4054. struct btrfs_block_rsv *block_rsv;
  4055. struct btrfs_block_group_cache *cache = NULL;
  4056. int ret;
  4057. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  4058. ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
  4059. parent, root->root_key.objectid,
  4060. btrfs_header_level(buf),
  4061. BTRFS_DROP_DELAYED_REF, NULL);
  4062. BUG_ON(ret);
  4063. }
  4064. if (!last_ref)
  4065. return;
  4066. block_rsv = get_block_rsv(trans, root);
  4067. cache = btrfs_lookup_block_group(root->fs_info, buf->start);
  4068. if (block_rsv->space_info != cache->space_info)
  4069. goto out;
  4070. if (btrfs_header_generation(buf) == trans->transid) {
  4071. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  4072. ret = check_ref_cleanup(trans, root, buf->start);
  4073. if (!ret)
  4074. goto pin;
  4075. }
  4076. if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
  4077. pin_down_extent(root, cache, buf->start, buf->len, 1);
  4078. goto pin;
  4079. }
  4080. WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
  4081. btrfs_add_free_space(cache, buf->start, buf->len);
  4082. ret = btrfs_update_reserved_bytes(cache, buf->len, 0, 0);
  4083. if (ret == -EAGAIN) {
  4084. /* block group became read-only */
  4085. btrfs_update_reserved_bytes(cache, buf->len, 0, 1);
  4086. goto out;
  4087. }
  4088. ret = 1;
  4089. spin_lock(&block_rsv->lock);
  4090. if (block_rsv->reserved < block_rsv->size) {
  4091. block_rsv->reserved += buf->len;
  4092. ret = 0;
  4093. }
  4094. spin_unlock(&block_rsv->lock);
  4095. if (ret) {
  4096. spin_lock(&cache->space_info->lock);
  4097. cache->space_info->bytes_reserved -= buf->len;
  4098. cache->space_info->reservation_progress++;
  4099. spin_unlock(&cache->space_info->lock);
  4100. }
  4101. goto out;
  4102. }
  4103. pin:
  4104. if (block_rsv->durable && !cache->ro) {
  4105. ret = 0;
  4106. spin_lock(&cache->lock);
  4107. if (!cache->ro) {
  4108. cache->reserved_pinned += buf->len;
  4109. ret = 1;
  4110. }
  4111. spin_unlock(&cache->lock);
  4112. if (ret) {
  4113. spin_lock(&block_rsv->lock);
  4114. block_rsv->freed[trans->transid & 0x1] += buf->len;
  4115. spin_unlock(&block_rsv->lock);
  4116. }
  4117. }
  4118. out:
  4119. /*
  4120. * Deleting the buffer, clear the corrupt flag since it doesn't matter
  4121. * anymore.
  4122. */
  4123. clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
  4124. btrfs_put_block_group(cache);
  4125. }
  4126. int btrfs_free_extent(struct btrfs_trans_handle *trans,
  4127. struct btrfs_root *root,
  4128. u64 bytenr, u64 num_bytes, u64 parent,
  4129. u64 root_objectid, u64 owner, u64 offset)
  4130. {
  4131. int ret;
  4132. /*
  4133. * tree log blocks never actually go into the extent allocation
  4134. * tree, just update pinning info and exit early.
  4135. */
  4136. if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
  4137. WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
  4138. /* unlocks the pinned mutex */
  4139. btrfs_pin_extent(root, bytenr, num_bytes, 1);
  4140. ret = 0;
  4141. } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  4142. ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
  4143. parent, root_objectid, (int)owner,
  4144. BTRFS_DROP_DELAYED_REF, NULL);
  4145. BUG_ON(ret);
  4146. } else {
  4147. ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
  4148. parent, root_objectid, owner,
  4149. offset, BTRFS_DROP_DELAYED_REF, NULL);
  4150. BUG_ON(ret);
  4151. }
  4152. return ret;
  4153. }
  4154. static u64 stripe_align(struct btrfs_root *root, u64 val)
  4155. {
  4156. u64 mask = ((u64)root->stripesize - 1);
  4157. u64 ret = (val + mask) & ~mask;
  4158. return ret;
  4159. }
  4160. /*
  4161. * when we wait for progress in the block group caching, its because
  4162. * our allocation attempt failed at least once. So, we must sleep
  4163. * and let some progress happen before we try again.
  4164. *
  4165. * This function will sleep at least once waiting for new free space to
  4166. * show up, and then it will check the block group free space numbers
  4167. * for our min num_bytes. Another option is to have it go ahead
  4168. * and look in the rbtree for a free extent of a given size, but this
  4169. * is a good start.
  4170. */
  4171. static noinline int
  4172. wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
  4173. u64 num_bytes)
  4174. {
  4175. struct btrfs_caching_control *caching_ctl;
  4176. DEFINE_WAIT(wait);
  4177. caching_ctl = get_caching_control(cache);
  4178. if (!caching_ctl)
  4179. return 0;
  4180. wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
  4181. (cache->free_space_ctl->free_space >= num_bytes));
  4182. put_caching_control(caching_ctl);
  4183. return 0;
  4184. }
  4185. static noinline int
  4186. wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
  4187. {
  4188. struct btrfs_caching_control *caching_ctl;
  4189. DEFINE_WAIT(wait);
  4190. caching_ctl = get_caching_control(cache);
  4191. if (!caching_ctl)
  4192. return 0;
  4193. wait_event(caching_ctl->wait, block_group_cache_done(cache));
  4194. put_caching_control(caching_ctl);
  4195. return 0;
  4196. }
  4197. static int get_block_group_index(struct btrfs_block_group_cache *cache)
  4198. {
  4199. int index;
  4200. if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
  4201. index = 0;
  4202. else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
  4203. index = 1;
  4204. else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
  4205. index = 2;
  4206. else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
  4207. index = 3;
  4208. else
  4209. index = 4;
  4210. return index;
  4211. }
  4212. enum btrfs_loop_type {
  4213. LOOP_FIND_IDEAL = 0,
  4214. LOOP_CACHING_NOWAIT = 1,
  4215. LOOP_CACHING_WAIT = 2,
  4216. LOOP_ALLOC_CHUNK = 3,
  4217. LOOP_NO_EMPTY_SIZE = 4,
  4218. };
  4219. /*
  4220. * walks the btree of allocated extents and find a hole of a given size.
  4221. * The key ins is changed to record the hole:
  4222. * ins->objectid == block start
  4223. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  4224. * ins->offset == number of blocks
  4225. * Any available blocks before search_start are skipped.
  4226. */
  4227. static noinline int find_free_extent(struct btrfs_trans_handle *trans,
  4228. struct btrfs_root *orig_root,
  4229. u64 num_bytes, u64 empty_size,
  4230. u64 search_start, u64 search_end,
  4231. u64 hint_byte, struct btrfs_key *ins,
  4232. int data)
  4233. {
  4234. int ret = 0;
  4235. struct btrfs_root *root = orig_root->fs_info->extent_root;
  4236. struct btrfs_free_cluster *last_ptr = NULL;
  4237. struct btrfs_block_group_cache *block_group = NULL;
  4238. int empty_cluster = 2 * 1024 * 1024;
  4239. int allowed_chunk_alloc = 0;
  4240. int done_chunk_alloc = 0;
  4241. struct btrfs_space_info *space_info;
  4242. int last_ptr_loop = 0;
  4243. int loop = 0;
  4244. int index = 0;
  4245. bool found_uncached_bg = false;
  4246. bool failed_cluster_refill = false;
  4247. bool failed_alloc = false;
  4248. bool use_cluster = true;
  4249. u64 ideal_cache_percent = 0;
  4250. u64 ideal_cache_offset = 0;
  4251. WARN_ON(num_bytes < root->sectorsize);
  4252. btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
  4253. ins->objectid = 0;
  4254. ins->offset = 0;
  4255. space_info = __find_space_info(root->fs_info, data);
  4256. if (!space_info) {
  4257. printk(KERN_ERR "No space info for %d\n", data);
  4258. return -ENOSPC;
  4259. }
  4260. /*
  4261. * If the space info is for both data and metadata it means we have a
  4262. * small filesystem and we can't use the clustering stuff.
  4263. */
  4264. if (btrfs_mixed_space_info(space_info))
  4265. use_cluster = false;
  4266. if (orig_root->ref_cows || empty_size)
  4267. allowed_chunk_alloc = 1;
  4268. if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
  4269. last_ptr = &root->fs_info->meta_alloc_cluster;
  4270. if (!btrfs_test_opt(root, SSD))
  4271. empty_cluster = 64 * 1024;
  4272. }
  4273. if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
  4274. btrfs_test_opt(root, SSD)) {
  4275. last_ptr = &root->fs_info->data_alloc_cluster;
  4276. }
  4277. if (last_ptr) {
  4278. spin_lock(&last_ptr->lock);
  4279. if (last_ptr->block_group)
  4280. hint_byte = last_ptr->window_start;
  4281. spin_unlock(&last_ptr->lock);
  4282. }
  4283. search_start = max(search_start, first_logical_byte(root, 0));
  4284. search_start = max(search_start, hint_byte);
  4285. if (!last_ptr)
  4286. empty_cluster = 0;
  4287. if (search_start == hint_byte) {
  4288. ideal_cache:
  4289. block_group = btrfs_lookup_block_group(root->fs_info,
  4290. search_start);
  4291. /*
  4292. * we don't want to use the block group if it doesn't match our
  4293. * allocation bits, or if its not cached.
  4294. *
  4295. * However if we are re-searching with an ideal block group
  4296. * picked out then we don't care that the block group is cached.
  4297. */
  4298. if (block_group && block_group_bits(block_group, data) &&
  4299. (block_group->cached != BTRFS_CACHE_NO ||
  4300. search_start == ideal_cache_offset)) {
  4301. down_read(&space_info->groups_sem);
  4302. if (list_empty(&block_group->list) ||
  4303. block_group->ro) {
  4304. /*
  4305. * someone is removing this block group,
  4306. * we can't jump into the have_block_group
  4307. * target because our list pointers are not
  4308. * valid
  4309. */
  4310. btrfs_put_block_group(block_group);
  4311. up_read(&space_info->groups_sem);
  4312. } else {
  4313. index = get_block_group_index(block_group);
  4314. goto have_block_group;
  4315. }
  4316. } else if (block_group) {
  4317. btrfs_put_block_group(block_group);
  4318. }
  4319. }
  4320. search:
  4321. down_read(&space_info->groups_sem);
  4322. list_for_each_entry(block_group, &space_info->block_groups[index],
  4323. list) {
  4324. u64 offset;
  4325. int cached;
  4326. btrfs_get_block_group(block_group);
  4327. search_start = block_group->key.objectid;
  4328. /*
  4329. * this can happen if we end up cycling through all the
  4330. * raid types, but we want to make sure we only allocate
  4331. * for the proper type.
  4332. */
  4333. if (!block_group_bits(block_group, data)) {
  4334. u64 extra = BTRFS_BLOCK_GROUP_DUP |
  4335. BTRFS_BLOCK_GROUP_RAID1 |
  4336. BTRFS_BLOCK_GROUP_RAID10;
  4337. /*
  4338. * if they asked for extra copies and this block group
  4339. * doesn't provide them, bail. This does allow us to
  4340. * fill raid0 from raid1.
  4341. */
  4342. if ((data & extra) && !(block_group->flags & extra))
  4343. goto loop;
  4344. }
  4345. have_block_group:
  4346. if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
  4347. u64 free_percent;
  4348. ret = cache_block_group(block_group, trans,
  4349. orig_root, 1);
  4350. if (block_group->cached == BTRFS_CACHE_FINISHED)
  4351. goto have_block_group;
  4352. free_percent = btrfs_block_group_used(&block_group->item);
  4353. free_percent *= 100;
  4354. free_percent = div64_u64(free_percent,
  4355. block_group->key.offset);
  4356. free_percent = 100 - free_percent;
  4357. if (free_percent > ideal_cache_percent &&
  4358. likely(!block_group->ro)) {
  4359. ideal_cache_offset = block_group->key.objectid;
  4360. ideal_cache_percent = free_percent;
  4361. }
  4362. /*
  4363. * We only want to start kthread caching if we are at
  4364. * the point where we will wait for caching to make
  4365. * progress, or if our ideal search is over and we've
  4366. * found somebody to start caching.
  4367. */
  4368. if (loop > LOOP_CACHING_NOWAIT ||
  4369. (loop > LOOP_FIND_IDEAL &&
  4370. atomic_read(&space_info->caching_threads) < 2)) {
  4371. ret = cache_block_group(block_group, trans,
  4372. orig_root, 0);
  4373. BUG_ON(ret);
  4374. }
  4375. found_uncached_bg = true;
  4376. /*
  4377. * If loop is set for cached only, try the next block
  4378. * group.
  4379. */
  4380. if (loop == LOOP_FIND_IDEAL)
  4381. goto loop;
  4382. }
  4383. cached = block_group_cache_done(block_group);
  4384. if (unlikely(!cached))
  4385. found_uncached_bg = true;
  4386. if (unlikely(block_group->ro))
  4387. goto loop;
  4388. /*
  4389. * Ok we want to try and use the cluster allocator, so lets look
  4390. * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
  4391. * have tried the cluster allocator plenty of times at this
  4392. * point and not have found anything, so we are likely way too
  4393. * fragmented for the clustering stuff to find anything, so lets
  4394. * just skip it and let the allocator find whatever block it can
  4395. * find
  4396. */
  4397. if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
  4398. /*
  4399. * the refill lock keeps out other
  4400. * people trying to start a new cluster
  4401. */
  4402. spin_lock(&last_ptr->refill_lock);
  4403. if (last_ptr->block_group &&
  4404. (last_ptr->block_group->ro ||
  4405. !block_group_bits(last_ptr->block_group, data))) {
  4406. offset = 0;
  4407. goto refill_cluster;
  4408. }
  4409. offset = btrfs_alloc_from_cluster(block_group, last_ptr,
  4410. num_bytes, search_start);
  4411. if (offset) {
  4412. /* we have a block, we're done */
  4413. spin_unlock(&last_ptr->refill_lock);
  4414. goto checks;
  4415. }
  4416. spin_lock(&last_ptr->lock);
  4417. /*
  4418. * whoops, this cluster doesn't actually point to
  4419. * this block group. Get a ref on the block
  4420. * group is does point to and try again
  4421. */
  4422. if (!last_ptr_loop && last_ptr->block_group &&
  4423. last_ptr->block_group != block_group) {
  4424. btrfs_put_block_group(block_group);
  4425. block_group = last_ptr->block_group;
  4426. btrfs_get_block_group(block_group);
  4427. spin_unlock(&last_ptr->lock);
  4428. spin_unlock(&last_ptr->refill_lock);
  4429. last_ptr_loop = 1;
  4430. search_start = block_group->key.objectid;
  4431. /*
  4432. * we know this block group is properly
  4433. * in the list because
  4434. * btrfs_remove_block_group, drops the
  4435. * cluster before it removes the block
  4436. * group from the list
  4437. */
  4438. goto have_block_group;
  4439. }
  4440. spin_unlock(&last_ptr->lock);
  4441. refill_cluster:
  4442. /*
  4443. * this cluster didn't work out, free it and
  4444. * start over
  4445. */
  4446. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  4447. last_ptr_loop = 0;
  4448. /* allocate a cluster in this block group */
  4449. ret = btrfs_find_space_cluster(trans, root,
  4450. block_group, last_ptr,
  4451. offset, num_bytes,
  4452. empty_cluster + empty_size);
  4453. if (ret == 0) {
  4454. /*
  4455. * now pull our allocation out of this
  4456. * cluster
  4457. */
  4458. offset = btrfs_alloc_from_cluster(block_group,
  4459. last_ptr, num_bytes,
  4460. search_start);
  4461. if (offset) {
  4462. /* we found one, proceed */
  4463. spin_unlock(&last_ptr->refill_lock);
  4464. goto checks;
  4465. }
  4466. } else if (!cached && loop > LOOP_CACHING_NOWAIT
  4467. && !failed_cluster_refill) {
  4468. spin_unlock(&last_ptr->refill_lock);
  4469. failed_cluster_refill = true;
  4470. wait_block_group_cache_progress(block_group,
  4471. num_bytes + empty_cluster + empty_size);
  4472. goto have_block_group;
  4473. }
  4474. /*
  4475. * at this point we either didn't find a cluster
  4476. * or we weren't able to allocate a block from our
  4477. * cluster. Free the cluster we've been trying
  4478. * to use, and go to the next block group
  4479. */
  4480. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  4481. spin_unlock(&last_ptr->refill_lock);
  4482. goto loop;
  4483. }
  4484. offset = btrfs_find_space_for_alloc(block_group, search_start,
  4485. num_bytes, empty_size);
  4486. /*
  4487. * If we didn't find a chunk, and we haven't failed on this
  4488. * block group before, and this block group is in the middle of
  4489. * caching and we are ok with waiting, then go ahead and wait
  4490. * for progress to be made, and set failed_alloc to true.
  4491. *
  4492. * If failed_alloc is true then we've already waited on this
  4493. * block group once and should move on to the next block group.
  4494. */
  4495. if (!offset && !failed_alloc && !cached &&
  4496. loop > LOOP_CACHING_NOWAIT) {
  4497. wait_block_group_cache_progress(block_group,
  4498. num_bytes + empty_size);
  4499. failed_alloc = true;
  4500. goto have_block_group;
  4501. } else if (!offset) {
  4502. goto loop;
  4503. }
  4504. checks:
  4505. search_start = stripe_align(root, offset);
  4506. /* move on to the next group */
  4507. if (search_start + num_bytes >= search_end) {
  4508. btrfs_add_free_space(block_group, offset, num_bytes);
  4509. goto loop;
  4510. }
  4511. /* move on to the next group */
  4512. if (search_start + num_bytes >
  4513. block_group->key.objectid + block_group->key.offset) {
  4514. btrfs_add_free_space(block_group, offset, num_bytes);
  4515. goto loop;
  4516. }
  4517. ins->objectid = search_start;
  4518. ins->offset = num_bytes;
  4519. if (offset < search_start)
  4520. btrfs_add_free_space(block_group, offset,
  4521. search_start - offset);
  4522. BUG_ON(offset > search_start);
  4523. ret = btrfs_update_reserved_bytes(block_group, num_bytes, 1,
  4524. (data & BTRFS_BLOCK_GROUP_DATA));
  4525. if (ret == -EAGAIN) {
  4526. btrfs_add_free_space(block_group, offset, num_bytes);
  4527. goto loop;
  4528. }
  4529. /* we are all good, lets return */
  4530. ins->objectid = search_start;
  4531. ins->offset = num_bytes;
  4532. if (offset < search_start)
  4533. btrfs_add_free_space(block_group, offset,
  4534. search_start - offset);
  4535. BUG_ON(offset > search_start);
  4536. break;
  4537. loop:
  4538. failed_cluster_refill = false;
  4539. failed_alloc = false;
  4540. BUG_ON(index != get_block_group_index(block_group));
  4541. btrfs_put_block_group(block_group);
  4542. }
  4543. up_read(&space_info->groups_sem);
  4544. if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
  4545. goto search;
  4546. /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
  4547. * for them to make caching progress. Also
  4548. * determine the best possible bg to cache
  4549. * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
  4550. * caching kthreads as we move along
  4551. * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
  4552. * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
  4553. * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
  4554. * again
  4555. */
  4556. if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
  4557. (found_uncached_bg || empty_size || empty_cluster ||
  4558. allowed_chunk_alloc)) {
  4559. index = 0;
  4560. if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
  4561. found_uncached_bg = false;
  4562. loop++;
  4563. if (!ideal_cache_percent &&
  4564. atomic_read(&space_info->caching_threads))
  4565. goto search;
  4566. /*
  4567. * 1 of the following 2 things have happened so far
  4568. *
  4569. * 1) We found an ideal block group for caching that
  4570. * is mostly full and will cache quickly, so we might
  4571. * as well wait for it.
  4572. *
  4573. * 2) We searched for cached only and we didn't find
  4574. * anything, and we didn't start any caching kthreads
  4575. * either, so chances are we will loop through and
  4576. * start a couple caching kthreads, and then come back
  4577. * around and just wait for them. This will be slower
  4578. * because we will have 2 caching kthreads reading at
  4579. * the same time when we could have just started one
  4580. * and waited for it to get far enough to give us an
  4581. * allocation, so go ahead and go to the wait caching
  4582. * loop.
  4583. */
  4584. loop = LOOP_CACHING_WAIT;
  4585. search_start = ideal_cache_offset;
  4586. ideal_cache_percent = 0;
  4587. goto ideal_cache;
  4588. } else if (loop == LOOP_FIND_IDEAL) {
  4589. /*
  4590. * Didn't find a uncached bg, wait on anything we find
  4591. * next.
  4592. */
  4593. loop = LOOP_CACHING_WAIT;
  4594. goto search;
  4595. }
  4596. if (loop < LOOP_CACHING_WAIT) {
  4597. loop++;
  4598. goto search;
  4599. }
  4600. if (loop == LOOP_ALLOC_CHUNK) {
  4601. empty_size = 0;
  4602. empty_cluster = 0;
  4603. }
  4604. if (allowed_chunk_alloc) {
  4605. ret = do_chunk_alloc(trans, root, num_bytes +
  4606. 2 * 1024 * 1024, data,
  4607. CHUNK_ALLOC_LIMITED);
  4608. allowed_chunk_alloc = 0;
  4609. done_chunk_alloc = 1;
  4610. } else if (!done_chunk_alloc &&
  4611. space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
  4612. space_info->force_alloc = CHUNK_ALLOC_LIMITED;
  4613. }
  4614. if (loop < LOOP_NO_EMPTY_SIZE) {
  4615. loop++;
  4616. goto search;
  4617. }
  4618. ret = -ENOSPC;
  4619. } else if (!ins->objectid) {
  4620. ret = -ENOSPC;
  4621. }
  4622. /* we found what we needed */
  4623. if (ins->objectid) {
  4624. if (!(data & BTRFS_BLOCK_GROUP_DATA))
  4625. trans->block_group = block_group->key.objectid;
  4626. btrfs_put_block_group(block_group);
  4627. ret = 0;
  4628. }
  4629. return ret;
  4630. }
  4631. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  4632. int dump_block_groups)
  4633. {
  4634. struct btrfs_block_group_cache *cache;
  4635. int index = 0;
  4636. spin_lock(&info->lock);
  4637. printk(KERN_INFO "space_info has %llu free, is %sfull\n",
  4638. (unsigned long long)(info->total_bytes - info->bytes_used -
  4639. info->bytes_pinned - info->bytes_reserved -
  4640. info->bytes_readonly),
  4641. (info->full) ? "" : "not ");
  4642. printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
  4643. "reserved=%llu, may_use=%llu, readonly=%llu\n",
  4644. (unsigned long long)info->total_bytes,
  4645. (unsigned long long)info->bytes_used,
  4646. (unsigned long long)info->bytes_pinned,
  4647. (unsigned long long)info->bytes_reserved,
  4648. (unsigned long long)info->bytes_may_use,
  4649. (unsigned long long)info->bytes_readonly);
  4650. spin_unlock(&info->lock);
  4651. if (!dump_block_groups)
  4652. return;
  4653. down_read(&info->groups_sem);
  4654. again:
  4655. list_for_each_entry(cache, &info->block_groups[index], list) {
  4656. spin_lock(&cache->lock);
  4657. printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
  4658. "%llu pinned %llu reserved\n",
  4659. (unsigned long long)cache->key.objectid,
  4660. (unsigned long long)cache->key.offset,
  4661. (unsigned long long)btrfs_block_group_used(&cache->item),
  4662. (unsigned long long)cache->pinned,
  4663. (unsigned long long)cache->reserved);
  4664. btrfs_dump_free_space(cache, bytes);
  4665. spin_unlock(&cache->lock);
  4666. }
  4667. if (++index < BTRFS_NR_RAID_TYPES)
  4668. goto again;
  4669. up_read(&info->groups_sem);
  4670. }
  4671. int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
  4672. struct btrfs_root *root,
  4673. u64 num_bytes, u64 min_alloc_size,
  4674. u64 empty_size, u64 hint_byte,
  4675. u64 search_end, struct btrfs_key *ins,
  4676. u64 data)
  4677. {
  4678. int ret;
  4679. u64 search_start = 0;
  4680. data = btrfs_get_alloc_profile(root, data);
  4681. again:
  4682. /*
  4683. * the only place that sets empty_size is btrfs_realloc_node, which
  4684. * is not called recursively on allocations
  4685. */
  4686. if (empty_size || root->ref_cows)
  4687. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  4688. num_bytes + 2 * 1024 * 1024, data,
  4689. CHUNK_ALLOC_NO_FORCE);
  4690. WARN_ON(num_bytes < root->sectorsize);
  4691. ret = find_free_extent(trans, root, num_bytes, empty_size,
  4692. search_start, search_end, hint_byte,
  4693. ins, data);
  4694. if (ret == -ENOSPC && num_bytes > min_alloc_size) {
  4695. num_bytes = num_bytes >> 1;
  4696. num_bytes = num_bytes & ~(root->sectorsize - 1);
  4697. num_bytes = max(num_bytes, min_alloc_size);
  4698. do_chunk_alloc(trans, root->fs_info->extent_root,
  4699. num_bytes, data, CHUNK_ALLOC_FORCE);
  4700. goto again;
  4701. }
  4702. if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
  4703. struct btrfs_space_info *sinfo;
  4704. sinfo = __find_space_info(root->fs_info, data);
  4705. printk(KERN_ERR "btrfs allocation failed flags %llu, "
  4706. "wanted %llu\n", (unsigned long long)data,
  4707. (unsigned long long)num_bytes);
  4708. dump_space_info(sinfo, num_bytes, 1);
  4709. }
  4710. trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
  4711. return ret;
  4712. }
  4713. int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
  4714. {
  4715. struct btrfs_block_group_cache *cache;
  4716. int ret = 0;
  4717. cache = btrfs_lookup_block_group(root->fs_info, start);
  4718. if (!cache) {
  4719. printk(KERN_ERR "Unable to find block group for %llu\n",
  4720. (unsigned long long)start);
  4721. return -ENOSPC;
  4722. }
  4723. if (btrfs_test_opt(root, DISCARD))
  4724. ret = btrfs_discard_extent(root, start, len, NULL);
  4725. btrfs_add_free_space(cache, start, len);
  4726. btrfs_update_reserved_bytes(cache, len, 0, 1);
  4727. btrfs_put_block_group(cache);
  4728. trace_btrfs_reserved_extent_free(root, start, len);
  4729. return ret;
  4730. }
  4731. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  4732. struct btrfs_root *root,
  4733. u64 parent, u64 root_objectid,
  4734. u64 flags, u64 owner, u64 offset,
  4735. struct btrfs_key *ins, int ref_mod)
  4736. {
  4737. int ret;
  4738. struct btrfs_fs_info *fs_info = root->fs_info;
  4739. struct btrfs_extent_item *extent_item;
  4740. struct btrfs_extent_inline_ref *iref;
  4741. struct btrfs_path *path;
  4742. struct extent_buffer *leaf;
  4743. int type;
  4744. u32 size;
  4745. if (parent > 0)
  4746. type = BTRFS_SHARED_DATA_REF_KEY;
  4747. else
  4748. type = BTRFS_EXTENT_DATA_REF_KEY;
  4749. size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
  4750. path = btrfs_alloc_path();
  4751. if (!path)
  4752. return -ENOMEM;
  4753. path->leave_spinning = 1;
  4754. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  4755. ins, size);
  4756. BUG_ON(ret);
  4757. leaf = path->nodes[0];
  4758. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  4759. struct btrfs_extent_item);
  4760. btrfs_set_extent_refs(leaf, extent_item, ref_mod);
  4761. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  4762. btrfs_set_extent_flags(leaf, extent_item,
  4763. flags | BTRFS_EXTENT_FLAG_DATA);
  4764. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  4765. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  4766. if (parent > 0) {
  4767. struct btrfs_shared_data_ref *ref;
  4768. ref = (struct btrfs_shared_data_ref *)(iref + 1);
  4769. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  4770. btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
  4771. } else {
  4772. struct btrfs_extent_data_ref *ref;
  4773. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  4774. btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
  4775. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  4776. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  4777. btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
  4778. }
  4779. btrfs_mark_buffer_dirty(path->nodes[0]);
  4780. btrfs_free_path(path);
  4781. ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
  4782. if (ret) {
  4783. printk(KERN_ERR "btrfs update block group failed for %llu "
  4784. "%llu\n", (unsigned long long)ins->objectid,
  4785. (unsigned long long)ins->offset);
  4786. BUG();
  4787. }
  4788. return ret;
  4789. }
  4790. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  4791. struct btrfs_root *root,
  4792. u64 parent, u64 root_objectid,
  4793. u64 flags, struct btrfs_disk_key *key,
  4794. int level, struct btrfs_key *ins)
  4795. {
  4796. int ret;
  4797. struct btrfs_fs_info *fs_info = root->fs_info;
  4798. struct btrfs_extent_item *extent_item;
  4799. struct btrfs_tree_block_info *block_info;
  4800. struct btrfs_extent_inline_ref *iref;
  4801. struct btrfs_path *path;
  4802. struct extent_buffer *leaf;
  4803. u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
  4804. path = btrfs_alloc_path();
  4805. BUG_ON(!path);
  4806. path->leave_spinning = 1;
  4807. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  4808. ins, size);
  4809. BUG_ON(ret);
  4810. leaf = path->nodes[0];
  4811. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  4812. struct btrfs_extent_item);
  4813. btrfs_set_extent_refs(leaf, extent_item, 1);
  4814. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  4815. btrfs_set_extent_flags(leaf, extent_item,
  4816. flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
  4817. block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
  4818. btrfs_set_tree_block_key(leaf, block_info, key);
  4819. btrfs_set_tree_block_level(leaf, block_info, level);
  4820. iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
  4821. if (parent > 0) {
  4822. BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
  4823. btrfs_set_extent_inline_ref_type(leaf, iref,
  4824. BTRFS_SHARED_BLOCK_REF_KEY);
  4825. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  4826. } else {
  4827. btrfs_set_extent_inline_ref_type(leaf, iref,
  4828. BTRFS_TREE_BLOCK_REF_KEY);
  4829. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  4830. }
  4831. btrfs_mark_buffer_dirty(leaf);
  4832. btrfs_free_path(path);
  4833. ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
  4834. if (ret) {
  4835. printk(KERN_ERR "btrfs update block group failed for %llu "
  4836. "%llu\n", (unsigned long long)ins->objectid,
  4837. (unsigned long long)ins->offset);
  4838. BUG();
  4839. }
  4840. return ret;
  4841. }
  4842. int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  4843. struct btrfs_root *root,
  4844. u64 root_objectid, u64 owner,
  4845. u64 offset, struct btrfs_key *ins)
  4846. {
  4847. int ret;
  4848. BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
  4849. ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
  4850. 0, root_objectid, owner, offset,
  4851. BTRFS_ADD_DELAYED_EXTENT, NULL);
  4852. return ret;
  4853. }
  4854. /*
  4855. * this is used by the tree logging recovery code. It records that
  4856. * an extent has been allocated and makes sure to clear the free
  4857. * space cache bits as well
  4858. */
  4859. int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
  4860. struct btrfs_root *root,
  4861. u64 root_objectid, u64 owner, u64 offset,
  4862. struct btrfs_key *ins)
  4863. {
  4864. int ret;
  4865. struct btrfs_block_group_cache *block_group;
  4866. struct btrfs_caching_control *caching_ctl;
  4867. u64 start = ins->objectid;
  4868. u64 num_bytes = ins->offset;
  4869. block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
  4870. cache_block_group(block_group, trans, NULL, 0);
  4871. caching_ctl = get_caching_control(block_group);
  4872. if (!caching_ctl) {
  4873. BUG_ON(!block_group_cache_done(block_group));
  4874. ret = btrfs_remove_free_space(block_group, start, num_bytes);
  4875. BUG_ON(ret);
  4876. } else {
  4877. mutex_lock(&caching_ctl->mutex);
  4878. if (start >= caching_ctl->progress) {
  4879. ret = add_excluded_extent(root, start, num_bytes);
  4880. BUG_ON(ret);
  4881. } else if (start + num_bytes <= caching_ctl->progress) {
  4882. ret = btrfs_remove_free_space(block_group,
  4883. start, num_bytes);
  4884. BUG_ON(ret);
  4885. } else {
  4886. num_bytes = caching_ctl->progress - start;
  4887. ret = btrfs_remove_free_space(block_group,
  4888. start, num_bytes);
  4889. BUG_ON(ret);
  4890. start = caching_ctl->progress;
  4891. num_bytes = ins->objectid + ins->offset -
  4892. caching_ctl->progress;
  4893. ret = add_excluded_extent(root, start, num_bytes);
  4894. BUG_ON(ret);
  4895. }
  4896. mutex_unlock(&caching_ctl->mutex);
  4897. put_caching_control(caching_ctl);
  4898. }
  4899. ret = btrfs_update_reserved_bytes(block_group, ins->offset, 1, 1);
  4900. BUG_ON(ret);
  4901. btrfs_put_block_group(block_group);
  4902. ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
  4903. 0, owner, offset, ins, 1);
  4904. return ret;
  4905. }
  4906. struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
  4907. struct btrfs_root *root,
  4908. u64 bytenr, u32 blocksize,
  4909. int level)
  4910. {
  4911. struct extent_buffer *buf;
  4912. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  4913. if (!buf)
  4914. return ERR_PTR(-ENOMEM);
  4915. btrfs_set_header_generation(buf, trans->transid);
  4916. btrfs_set_buffer_lockdep_class(buf, level);
  4917. btrfs_tree_lock(buf);
  4918. clean_tree_block(trans, root, buf);
  4919. btrfs_set_lock_blocking(buf);
  4920. btrfs_set_buffer_uptodate(buf);
  4921. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  4922. /*
  4923. * we allow two log transactions at a time, use different
  4924. * EXENT bit to differentiate dirty pages.
  4925. */
  4926. if (root->log_transid % 2 == 0)
  4927. set_extent_dirty(&root->dirty_log_pages, buf->start,
  4928. buf->start + buf->len - 1, GFP_NOFS);
  4929. else
  4930. set_extent_new(&root->dirty_log_pages, buf->start,
  4931. buf->start + buf->len - 1, GFP_NOFS);
  4932. } else {
  4933. set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
  4934. buf->start + buf->len - 1, GFP_NOFS);
  4935. }
  4936. trans->blocks_used++;
  4937. /* this returns a buffer locked for blocking */
  4938. return buf;
  4939. }
  4940. static struct btrfs_block_rsv *
  4941. use_block_rsv(struct btrfs_trans_handle *trans,
  4942. struct btrfs_root *root, u32 blocksize)
  4943. {
  4944. struct btrfs_block_rsv *block_rsv;
  4945. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  4946. int ret;
  4947. block_rsv = get_block_rsv(trans, root);
  4948. if (block_rsv->size == 0) {
  4949. ret = reserve_metadata_bytes(trans, root, block_rsv,
  4950. blocksize, 0);
  4951. /*
  4952. * If we couldn't reserve metadata bytes try and use some from
  4953. * the global reserve.
  4954. */
  4955. if (ret && block_rsv != global_rsv) {
  4956. ret = block_rsv_use_bytes(global_rsv, blocksize);
  4957. if (!ret)
  4958. return global_rsv;
  4959. return ERR_PTR(ret);
  4960. } else if (ret) {
  4961. return ERR_PTR(ret);
  4962. }
  4963. return block_rsv;
  4964. }
  4965. ret = block_rsv_use_bytes(block_rsv, blocksize);
  4966. if (!ret)
  4967. return block_rsv;
  4968. if (ret) {
  4969. WARN_ON(1);
  4970. ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
  4971. 0);
  4972. if (!ret) {
  4973. spin_lock(&block_rsv->lock);
  4974. block_rsv->size += blocksize;
  4975. spin_unlock(&block_rsv->lock);
  4976. return block_rsv;
  4977. } else if (ret && block_rsv != global_rsv) {
  4978. ret = block_rsv_use_bytes(global_rsv, blocksize);
  4979. if (!ret)
  4980. return global_rsv;
  4981. }
  4982. }
  4983. return ERR_PTR(-ENOSPC);
  4984. }
  4985. static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
  4986. {
  4987. block_rsv_add_bytes(block_rsv, blocksize, 0);
  4988. block_rsv_release_bytes(block_rsv, NULL, 0);
  4989. }
  4990. /*
  4991. * finds a free extent and does all the dirty work required for allocation
  4992. * returns the key for the extent through ins, and a tree buffer for
  4993. * the first block of the extent through buf.
  4994. *
  4995. * returns the tree buffer or NULL.
  4996. */
  4997. struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
  4998. struct btrfs_root *root, u32 blocksize,
  4999. u64 parent, u64 root_objectid,
  5000. struct btrfs_disk_key *key, int level,
  5001. u64 hint, u64 empty_size)
  5002. {
  5003. struct btrfs_key ins;
  5004. struct btrfs_block_rsv *block_rsv;
  5005. struct extent_buffer *buf;
  5006. u64 flags = 0;
  5007. int ret;
  5008. block_rsv = use_block_rsv(trans, root, blocksize);
  5009. if (IS_ERR(block_rsv))
  5010. return ERR_CAST(block_rsv);
  5011. ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
  5012. empty_size, hint, (u64)-1, &ins, 0);
  5013. if (ret) {
  5014. unuse_block_rsv(block_rsv, blocksize);
  5015. return ERR_PTR(ret);
  5016. }
  5017. buf = btrfs_init_new_buffer(trans, root, ins.objectid,
  5018. blocksize, level);
  5019. BUG_ON(IS_ERR(buf));
  5020. if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
  5021. if (parent == 0)
  5022. parent = ins.objectid;
  5023. flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
  5024. } else
  5025. BUG_ON(parent > 0);
  5026. if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
  5027. struct btrfs_delayed_extent_op *extent_op;
  5028. extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
  5029. BUG_ON(!extent_op);
  5030. if (key)
  5031. memcpy(&extent_op->key, key, sizeof(extent_op->key));
  5032. else
  5033. memset(&extent_op->key, 0, sizeof(extent_op->key));
  5034. extent_op->flags_to_set = flags;
  5035. extent_op->update_key = 1;
  5036. extent_op->update_flags = 1;
  5037. extent_op->is_data = 0;
  5038. ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
  5039. ins.offset, parent, root_objectid,
  5040. level, BTRFS_ADD_DELAYED_EXTENT,
  5041. extent_op);
  5042. BUG_ON(ret);
  5043. }
  5044. return buf;
  5045. }
  5046. struct walk_control {
  5047. u64 refs[BTRFS_MAX_LEVEL];
  5048. u64 flags[BTRFS_MAX_LEVEL];
  5049. struct btrfs_key update_progress;
  5050. int stage;
  5051. int level;
  5052. int shared_level;
  5053. int update_ref;
  5054. int keep_locks;
  5055. int reada_slot;
  5056. int reada_count;
  5057. };
  5058. #define DROP_REFERENCE 1
  5059. #define UPDATE_BACKREF 2
  5060. static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
  5061. struct btrfs_root *root,
  5062. struct walk_control *wc,
  5063. struct btrfs_path *path)
  5064. {
  5065. u64 bytenr;
  5066. u64 generation;
  5067. u64 refs;
  5068. u64 flags;
  5069. u32 nritems;
  5070. u32 blocksize;
  5071. struct btrfs_key key;
  5072. struct extent_buffer *eb;
  5073. int ret;
  5074. int slot;
  5075. int nread = 0;
  5076. if (path->slots[wc->level] < wc->reada_slot) {
  5077. wc->reada_count = wc->reada_count * 2 / 3;
  5078. wc->reada_count = max(wc->reada_count, 2);
  5079. } else {
  5080. wc->reada_count = wc->reada_count * 3 / 2;
  5081. wc->reada_count = min_t(int, wc->reada_count,
  5082. BTRFS_NODEPTRS_PER_BLOCK(root));
  5083. }
  5084. eb = path->nodes[wc->level];
  5085. nritems = btrfs_header_nritems(eb);
  5086. blocksize = btrfs_level_size(root, wc->level - 1);
  5087. for (slot = path->slots[wc->level]; slot < nritems; slot++) {
  5088. if (nread >= wc->reada_count)
  5089. break;
  5090. cond_resched();
  5091. bytenr = btrfs_node_blockptr(eb, slot);
  5092. generation = btrfs_node_ptr_generation(eb, slot);
  5093. if (slot == path->slots[wc->level])
  5094. goto reada;
  5095. if (wc->stage == UPDATE_BACKREF &&
  5096. generation <= root->root_key.offset)
  5097. continue;
  5098. /* We don't lock the tree block, it's OK to be racy here */
  5099. ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
  5100. &refs, &flags);
  5101. BUG_ON(ret);
  5102. BUG_ON(refs == 0);
  5103. if (wc->stage == DROP_REFERENCE) {
  5104. if (refs == 1)
  5105. goto reada;
  5106. if (wc->level == 1 &&
  5107. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5108. continue;
  5109. if (!wc->update_ref ||
  5110. generation <= root->root_key.offset)
  5111. continue;
  5112. btrfs_node_key_to_cpu(eb, &key, slot);
  5113. ret = btrfs_comp_cpu_keys(&key,
  5114. &wc->update_progress);
  5115. if (ret < 0)
  5116. continue;
  5117. } else {
  5118. if (wc->level == 1 &&
  5119. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5120. continue;
  5121. }
  5122. reada:
  5123. ret = readahead_tree_block(root, bytenr, blocksize,
  5124. generation);
  5125. if (ret)
  5126. break;
  5127. nread++;
  5128. }
  5129. wc->reada_slot = slot;
  5130. }
  5131. /*
  5132. * hepler to process tree block while walking down the tree.
  5133. *
  5134. * when wc->stage == UPDATE_BACKREF, this function updates
  5135. * back refs for pointers in the block.
  5136. *
  5137. * NOTE: return value 1 means we should stop walking down.
  5138. */
  5139. static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
  5140. struct btrfs_root *root,
  5141. struct btrfs_path *path,
  5142. struct walk_control *wc, int lookup_info)
  5143. {
  5144. int level = wc->level;
  5145. struct extent_buffer *eb = path->nodes[level];
  5146. u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  5147. int ret;
  5148. if (wc->stage == UPDATE_BACKREF &&
  5149. btrfs_header_owner(eb) != root->root_key.objectid)
  5150. return 1;
  5151. /*
  5152. * when reference count of tree block is 1, it won't increase
  5153. * again. once full backref flag is set, we never clear it.
  5154. */
  5155. if (lookup_info &&
  5156. ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
  5157. (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
  5158. BUG_ON(!path->locks[level]);
  5159. ret = btrfs_lookup_extent_info(trans, root,
  5160. eb->start, eb->len,
  5161. &wc->refs[level],
  5162. &wc->flags[level]);
  5163. BUG_ON(ret);
  5164. BUG_ON(wc->refs[level] == 0);
  5165. }
  5166. if (wc->stage == DROP_REFERENCE) {
  5167. if (wc->refs[level] > 1)
  5168. return 1;
  5169. if (path->locks[level] && !wc->keep_locks) {
  5170. btrfs_tree_unlock(eb);
  5171. path->locks[level] = 0;
  5172. }
  5173. return 0;
  5174. }
  5175. /* wc->stage == UPDATE_BACKREF */
  5176. if (!(wc->flags[level] & flag)) {
  5177. BUG_ON(!path->locks[level]);
  5178. ret = btrfs_inc_ref(trans, root, eb, 1);
  5179. BUG_ON(ret);
  5180. ret = btrfs_dec_ref(trans, root, eb, 0);
  5181. BUG_ON(ret);
  5182. ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
  5183. eb->len, flag, 0);
  5184. BUG_ON(ret);
  5185. wc->flags[level] |= flag;
  5186. }
  5187. /*
  5188. * the block is shared by multiple trees, so it's not good to
  5189. * keep the tree lock
  5190. */
  5191. if (path->locks[level] && level > 0) {
  5192. btrfs_tree_unlock(eb);
  5193. path->locks[level] = 0;
  5194. }
  5195. return 0;
  5196. }
  5197. /*
  5198. * hepler to process tree block pointer.
  5199. *
  5200. * when wc->stage == DROP_REFERENCE, this function checks
  5201. * reference count of the block pointed to. if the block
  5202. * is shared and we need update back refs for the subtree
  5203. * rooted at the block, this function changes wc->stage to
  5204. * UPDATE_BACKREF. if the block is shared and there is no
  5205. * need to update back, this function drops the reference
  5206. * to the block.
  5207. *
  5208. * NOTE: return value 1 means we should stop walking down.
  5209. */
  5210. static noinline int do_walk_down(struct btrfs_trans_handle *trans,
  5211. struct btrfs_root *root,
  5212. struct btrfs_path *path,
  5213. struct walk_control *wc, int *lookup_info)
  5214. {
  5215. u64 bytenr;
  5216. u64 generation;
  5217. u64 parent;
  5218. u32 blocksize;
  5219. struct btrfs_key key;
  5220. struct extent_buffer *next;
  5221. int level = wc->level;
  5222. int reada = 0;
  5223. int ret = 0;
  5224. generation = btrfs_node_ptr_generation(path->nodes[level],
  5225. path->slots[level]);
  5226. /*
  5227. * if the lower level block was created before the snapshot
  5228. * was created, we know there is no need to update back refs
  5229. * for the subtree
  5230. */
  5231. if (wc->stage == UPDATE_BACKREF &&
  5232. generation <= root->root_key.offset) {
  5233. *lookup_info = 1;
  5234. return 1;
  5235. }
  5236. bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
  5237. blocksize = btrfs_level_size(root, level - 1);
  5238. next = btrfs_find_tree_block(root, bytenr, blocksize);
  5239. if (!next) {
  5240. next = btrfs_find_create_tree_block(root, bytenr, blocksize);
  5241. if (!next)
  5242. return -ENOMEM;
  5243. reada = 1;
  5244. }
  5245. btrfs_tree_lock(next);
  5246. btrfs_set_lock_blocking(next);
  5247. ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
  5248. &wc->refs[level - 1],
  5249. &wc->flags[level - 1]);
  5250. BUG_ON(ret);
  5251. BUG_ON(wc->refs[level - 1] == 0);
  5252. *lookup_info = 0;
  5253. if (wc->stage == DROP_REFERENCE) {
  5254. if (wc->refs[level - 1] > 1) {
  5255. if (level == 1 &&
  5256. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5257. goto skip;
  5258. if (!wc->update_ref ||
  5259. generation <= root->root_key.offset)
  5260. goto skip;
  5261. btrfs_node_key_to_cpu(path->nodes[level], &key,
  5262. path->slots[level]);
  5263. ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
  5264. if (ret < 0)
  5265. goto skip;
  5266. wc->stage = UPDATE_BACKREF;
  5267. wc->shared_level = level - 1;
  5268. }
  5269. } else {
  5270. if (level == 1 &&
  5271. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  5272. goto skip;
  5273. }
  5274. if (!btrfs_buffer_uptodate(next, generation)) {
  5275. btrfs_tree_unlock(next);
  5276. free_extent_buffer(next);
  5277. next = NULL;
  5278. *lookup_info = 1;
  5279. }
  5280. if (!next) {
  5281. if (reada && level == 1)
  5282. reada_walk_down(trans, root, wc, path);
  5283. next = read_tree_block(root, bytenr, blocksize, generation);
  5284. if (!next)
  5285. return -EIO;
  5286. btrfs_tree_lock(next);
  5287. btrfs_set_lock_blocking(next);
  5288. }
  5289. level--;
  5290. BUG_ON(level != btrfs_header_level(next));
  5291. path->nodes[level] = next;
  5292. path->slots[level] = 0;
  5293. path->locks[level] = 1;
  5294. wc->level = level;
  5295. if (wc->level == 1)
  5296. wc->reada_slot = 0;
  5297. return 0;
  5298. skip:
  5299. wc->refs[level - 1] = 0;
  5300. wc->flags[level - 1] = 0;
  5301. if (wc->stage == DROP_REFERENCE) {
  5302. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
  5303. parent = path->nodes[level]->start;
  5304. } else {
  5305. BUG_ON(root->root_key.objectid !=
  5306. btrfs_header_owner(path->nodes[level]));
  5307. parent = 0;
  5308. }
  5309. ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
  5310. root->root_key.objectid, level - 1, 0);
  5311. BUG_ON(ret);
  5312. }
  5313. btrfs_tree_unlock(next);
  5314. free_extent_buffer(next);
  5315. *lookup_info = 1;
  5316. return 1;
  5317. }
  5318. /*
  5319. * hepler to process tree block while walking up the tree.
  5320. *
  5321. * when wc->stage == DROP_REFERENCE, this function drops
  5322. * reference count on the block.
  5323. *
  5324. * when wc->stage == UPDATE_BACKREF, this function changes
  5325. * wc->stage back to DROP_REFERENCE if we changed wc->stage
  5326. * to UPDATE_BACKREF previously while processing the block.
  5327. *
  5328. * NOTE: return value 1 means we should stop walking up.
  5329. */
  5330. static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
  5331. struct btrfs_root *root,
  5332. struct btrfs_path *path,
  5333. struct walk_control *wc)
  5334. {
  5335. int ret;
  5336. int level = wc->level;
  5337. struct extent_buffer *eb = path->nodes[level];
  5338. u64 parent = 0;
  5339. if (wc->stage == UPDATE_BACKREF) {
  5340. BUG_ON(wc->shared_level < level);
  5341. if (level < wc->shared_level)
  5342. goto out;
  5343. ret = find_next_key(path, level + 1, &wc->update_progress);
  5344. if (ret > 0)
  5345. wc->update_ref = 0;
  5346. wc->stage = DROP_REFERENCE;
  5347. wc->shared_level = -1;
  5348. path->slots[level] = 0;
  5349. /*
  5350. * check reference count again if the block isn't locked.
  5351. * we should start walking down the tree again if reference
  5352. * count is one.
  5353. */
  5354. if (!path->locks[level]) {
  5355. BUG_ON(level == 0);
  5356. btrfs_tree_lock(eb);
  5357. btrfs_set_lock_blocking(eb);
  5358. path->locks[level] = 1;
  5359. ret = btrfs_lookup_extent_info(trans, root,
  5360. eb->start, eb->len,
  5361. &wc->refs[level],
  5362. &wc->flags[level]);
  5363. BUG_ON(ret);
  5364. BUG_ON(wc->refs[level] == 0);
  5365. if (wc->refs[level] == 1) {
  5366. btrfs_tree_unlock(eb);
  5367. path->locks[level] = 0;
  5368. return 1;
  5369. }
  5370. }
  5371. }
  5372. /* wc->stage == DROP_REFERENCE */
  5373. BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
  5374. if (wc->refs[level] == 1) {
  5375. if (level == 0) {
  5376. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  5377. ret = btrfs_dec_ref(trans, root, eb, 1);
  5378. else
  5379. ret = btrfs_dec_ref(trans, root, eb, 0);
  5380. BUG_ON(ret);
  5381. }
  5382. /* make block locked assertion in clean_tree_block happy */
  5383. if (!path->locks[level] &&
  5384. btrfs_header_generation(eb) == trans->transid) {
  5385. btrfs_tree_lock(eb);
  5386. btrfs_set_lock_blocking(eb);
  5387. path->locks[level] = 1;
  5388. }
  5389. clean_tree_block(trans, root, eb);
  5390. }
  5391. if (eb == root->node) {
  5392. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  5393. parent = eb->start;
  5394. else
  5395. BUG_ON(root->root_key.objectid !=
  5396. btrfs_header_owner(eb));
  5397. } else {
  5398. if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  5399. parent = path->nodes[level + 1]->start;
  5400. else
  5401. BUG_ON(root->root_key.objectid !=
  5402. btrfs_header_owner(path->nodes[level + 1]));
  5403. }
  5404. btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
  5405. out:
  5406. wc->refs[level] = 0;
  5407. wc->flags[level] = 0;
  5408. return 0;
  5409. }
  5410. static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
  5411. struct btrfs_root *root,
  5412. struct btrfs_path *path,
  5413. struct walk_control *wc)
  5414. {
  5415. int level = wc->level;
  5416. int lookup_info = 1;
  5417. int ret;
  5418. while (level >= 0) {
  5419. ret = walk_down_proc(trans, root, path, wc, lookup_info);
  5420. if (ret > 0)
  5421. break;
  5422. if (level == 0)
  5423. break;
  5424. if (path->slots[level] >=
  5425. btrfs_header_nritems(path->nodes[level]))
  5426. break;
  5427. ret = do_walk_down(trans, root, path, wc, &lookup_info);
  5428. if (ret > 0) {
  5429. path->slots[level]++;
  5430. continue;
  5431. } else if (ret < 0)
  5432. return ret;
  5433. level = wc->level;
  5434. }
  5435. return 0;
  5436. }
  5437. static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  5438. struct btrfs_root *root,
  5439. struct btrfs_path *path,
  5440. struct walk_control *wc, int max_level)
  5441. {
  5442. int level = wc->level;
  5443. int ret;
  5444. path->slots[level] = btrfs_header_nritems(path->nodes[level]);
  5445. while (level < max_level && path->nodes[level]) {
  5446. wc->level = level;
  5447. if (path->slots[level] + 1 <
  5448. btrfs_header_nritems(path->nodes[level])) {
  5449. path->slots[level]++;
  5450. return 0;
  5451. } else {
  5452. ret = walk_up_proc(trans, root, path, wc);
  5453. if (ret > 0)
  5454. return 0;
  5455. if (path->locks[level]) {
  5456. btrfs_tree_unlock(path->nodes[level]);
  5457. path->locks[level] = 0;
  5458. }
  5459. free_extent_buffer(path->nodes[level]);
  5460. path->nodes[level] = NULL;
  5461. level++;
  5462. }
  5463. }
  5464. return 1;
  5465. }
  5466. /*
  5467. * drop a subvolume tree.
  5468. *
  5469. * this function traverses the tree freeing any blocks that only
  5470. * referenced by the tree.
  5471. *
  5472. * when a shared tree block is found. this function decreases its
  5473. * reference count by one. if update_ref is true, this function
  5474. * also make sure backrefs for the shared block and all lower level
  5475. * blocks are properly updated.
  5476. */
  5477. int btrfs_drop_snapshot(struct btrfs_root *root,
  5478. struct btrfs_block_rsv *block_rsv, int update_ref)
  5479. {
  5480. struct btrfs_path *path;
  5481. struct btrfs_trans_handle *trans;
  5482. struct btrfs_root *tree_root = root->fs_info->tree_root;
  5483. struct btrfs_root_item *root_item = &root->root_item;
  5484. struct walk_control *wc;
  5485. struct btrfs_key key;
  5486. int err = 0;
  5487. int ret;
  5488. int level;
  5489. path = btrfs_alloc_path();
  5490. BUG_ON(!path);
  5491. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  5492. BUG_ON(!wc);
  5493. trans = btrfs_start_transaction(tree_root, 0);
  5494. BUG_ON(IS_ERR(trans));
  5495. if (block_rsv)
  5496. trans->block_rsv = block_rsv;
  5497. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  5498. level = btrfs_header_level(root->node);
  5499. path->nodes[level] = btrfs_lock_root_node(root);
  5500. btrfs_set_lock_blocking(path->nodes[level]);
  5501. path->slots[level] = 0;
  5502. path->locks[level] = 1;
  5503. memset(&wc->update_progress, 0,
  5504. sizeof(wc->update_progress));
  5505. } else {
  5506. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  5507. memcpy(&wc->update_progress, &key,
  5508. sizeof(wc->update_progress));
  5509. level = root_item->drop_level;
  5510. BUG_ON(level == 0);
  5511. path->lowest_level = level;
  5512. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  5513. path->lowest_level = 0;
  5514. if (ret < 0) {
  5515. err = ret;
  5516. goto out;
  5517. }
  5518. WARN_ON(ret > 0);
  5519. /*
  5520. * unlock our path, this is safe because only this
  5521. * function is allowed to delete this snapshot
  5522. */
  5523. btrfs_unlock_up_safe(path, 0);
  5524. level = btrfs_header_level(root->node);
  5525. while (1) {
  5526. btrfs_tree_lock(path->nodes[level]);
  5527. btrfs_set_lock_blocking(path->nodes[level]);
  5528. ret = btrfs_lookup_extent_info(trans, root,
  5529. path->nodes[level]->start,
  5530. path->nodes[level]->len,
  5531. &wc->refs[level],
  5532. &wc->flags[level]);
  5533. BUG_ON(ret);
  5534. BUG_ON(wc->refs[level] == 0);
  5535. if (level == root_item->drop_level)
  5536. break;
  5537. btrfs_tree_unlock(path->nodes[level]);
  5538. WARN_ON(wc->refs[level] != 1);
  5539. level--;
  5540. }
  5541. }
  5542. wc->level = level;
  5543. wc->shared_level = -1;
  5544. wc->stage = DROP_REFERENCE;
  5545. wc->update_ref = update_ref;
  5546. wc->keep_locks = 0;
  5547. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  5548. while (1) {
  5549. ret = walk_down_tree(trans, root, path, wc);
  5550. if (ret < 0) {
  5551. err = ret;
  5552. break;
  5553. }
  5554. ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
  5555. if (ret < 0) {
  5556. err = ret;
  5557. break;
  5558. }
  5559. if (ret > 0) {
  5560. BUG_ON(wc->stage != DROP_REFERENCE);
  5561. break;
  5562. }
  5563. if (wc->stage == DROP_REFERENCE) {
  5564. level = wc->level;
  5565. btrfs_node_key(path->nodes[level],
  5566. &root_item->drop_progress,
  5567. path->slots[level]);
  5568. root_item->drop_level = level;
  5569. }
  5570. BUG_ON(wc->level == 0);
  5571. if (btrfs_should_end_transaction(trans, tree_root)) {
  5572. ret = btrfs_update_root(trans, tree_root,
  5573. &root->root_key,
  5574. root_item);
  5575. BUG_ON(ret);
  5576. btrfs_end_transaction_throttle(trans, tree_root);
  5577. trans = btrfs_start_transaction(tree_root, 0);
  5578. BUG_ON(IS_ERR(trans));
  5579. if (block_rsv)
  5580. trans->block_rsv = block_rsv;
  5581. }
  5582. }
  5583. btrfs_release_path(path);
  5584. BUG_ON(err);
  5585. ret = btrfs_del_root(trans, tree_root, &root->root_key);
  5586. BUG_ON(ret);
  5587. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  5588. ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
  5589. NULL, NULL);
  5590. BUG_ON(ret < 0);
  5591. if (ret > 0) {
  5592. /* if we fail to delete the orphan item this time
  5593. * around, it'll get picked up the next time.
  5594. *
  5595. * The most common failure here is just -ENOENT.
  5596. */
  5597. btrfs_del_orphan_item(trans, tree_root,
  5598. root->root_key.objectid);
  5599. }
  5600. }
  5601. if (root->in_radix) {
  5602. btrfs_free_fs_root(tree_root->fs_info, root);
  5603. } else {
  5604. free_extent_buffer(root->node);
  5605. free_extent_buffer(root->commit_root);
  5606. kfree(root);
  5607. }
  5608. out:
  5609. btrfs_end_transaction_throttle(trans, tree_root);
  5610. kfree(wc);
  5611. btrfs_free_path(path);
  5612. return err;
  5613. }
  5614. /*
  5615. * drop subtree rooted at tree block 'node'.
  5616. *
  5617. * NOTE: this function will unlock and release tree block 'node'
  5618. */
  5619. int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
  5620. struct btrfs_root *root,
  5621. struct extent_buffer *node,
  5622. struct extent_buffer *parent)
  5623. {
  5624. struct btrfs_path *path;
  5625. struct walk_control *wc;
  5626. int level;
  5627. int parent_level;
  5628. int ret = 0;
  5629. int wret;
  5630. BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  5631. path = btrfs_alloc_path();
  5632. if (!path)
  5633. return -ENOMEM;
  5634. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  5635. if (!wc) {
  5636. btrfs_free_path(path);
  5637. return -ENOMEM;
  5638. }
  5639. btrfs_assert_tree_locked(parent);
  5640. parent_level = btrfs_header_level(parent);
  5641. extent_buffer_get(parent);
  5642. path->nodes[parent_level] = parent;
  5643. path->slots[parent_level] = btrfs_header_nritems(parent);
  5644. btrfs_assert_tree_locked(node);
  5645. level = btrfs_header_level(node);
  5646. path->nodes[level] = node;
  5647. path->slots[level] = 0;
  5648. path->locks[level] = 1;
  5649. wc->refs[parent_level] = 1;
  5650. wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  5651. wc->level = level;
  5652. wc->shared_level = -1;
  5653. wc->stage = DROP_REFERENCE;
  5654. wc->update_ref = 0;
  5655. wc->keep_locks = 1;
  5656. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  5657. while (1) {
  5658. wret = walk_down_tree(trans, root, path, wc);
  5659. if (wret < 0) {
  5660. ret = wret;
  5661. break;
  5662. }
  5663. wret = walk_up_tree(trans, root, path, wc, parent_level);
  5664. if (wret < 0)
  5665. ret = wret;
  5666. if (wret != 0)
  5667. break;
  5668. }
  5669. kfree(wc);
  5670. btrfs_free_path(path);
  5671. return ret;
  5672. }
  5673. static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
  5674. {
  5675. u64 num_devices;
  5676. u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
  5677. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
  5678. /*
  5679. * we add in the count of missing devices because we want
  5680. * to make sure that any RAID levels on a degraded FS
  5681. * continue to be honored.
  5682. */
  5683. num_devices = root->fs_info->fs_devices->rw_devices +
  5684. root->fs_info->fs_devices->missing_devices;
  5685. if (num_devices == 1) {
  5686. stripped |= BTRFS_BLOCK_GROUP_DUP;
  5687. stripped = flags & ~stripped;
  5688. /* turn raid0 into single device chunks */
  5689. if (flags & BTRFS_BLOCK_GROUP_RAID0)
  5690. return stripped;
  5691. /* turn mirroring into duplication */
  5692. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  5693. BTRFS_BLOCK_GROUP_RAID10))
  5694. return stripped | BTRFS_BLOCK_GROUP_DUP;
  5695. return flags;
  5696. } else {
  5697. /* they already had raid on here, just return */
  5698. if (flags & stripped)
  5699. return flags;
  5700. stripped |= BTRFS_BLOCK_GROUP_DUP;
  5701. stripped = flags & ~stripped;
  5702. /* switch duplicated blocks with raid1 */
  5703. if (flags & BTRFS_BLOCK_GROUP_DUP)
  5704. return stripped | BTRFS_BLOCK_GROUP_RAID1;
  5705. /* turn single device chunks into raid0 */
  5706. return stripped | BTRFS_BLOCK_GROUP_RAID0;
  5707. }
  5708. return flags;
  5709. }
  5710. static int set_block_group_ro(struct btrfs_block_group_cache *cache)
  5711. {
  5712. struct btrfs_space_info *sinfo = cache->space_info;
  5713. u64 num_bytes;
  5714. int ret = -ENOSPC;
  5715. if (cache->ro)
  5716. return 0;
  5717. spin_lock(&sinfo->lock);
  5718. spin_lock(&cache->lock);
  5719. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  5720. cache->bytes_super - btrfs_block_group_used(&cache->item);
  5721. if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
  5722. sinfo->bytes_may_use + sinfo->bytes_readonly +
  5723. cache->reserved_pinned + num_bytes <= sinfo->total_bytes) {
  5724. sinfo->bytes_readonly += num_bytes;
  5725. sinfo->bytes_reserved += cache->reserved_pinned;
  5726. cache->reserved_pinned = 0;
  5727. cache->ro = 1;
  5728. ret = 0;
  5729. }
  5730. spin_unlock(&cache->lock);
  5731. spin_unlock(&sinfo->lock);
  5732. return ret;
  5733. }
  5734. int btrfs_set_block_group_ro(struct btrfs_root *root,
  5735. struct btrfs_block_group_cache *cache)
  5736. {
  5737. struct btrfs_trans_handle *trans;
  5738. u64 alloc_flags;
  5739. int ret;
  5740. BUG_ON(cache->ro);
  5741. trans = btrfs_join_transaction(root, 1);
  5742. BUG_ON(IS_ERR(trans));
  5743. alloc_flags = update_block_group_flags(root, cache->flags);
  5744. if (alloc_flags != cache->flags)
  5745. do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
  5746. CHUNK_ALLOC_FORCE);
  5747. ret = set_block_group_ro(cache);
  5748. if (!ret)
  5749. goto out;
  5750. alloc_flags = get_alloc_profile(root, cache->space_info->flags);
  5751. ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
  5752. CHUNK_ALLOC_FORCE);
  5753. if (ret < 0)
  5754. goto out;
  5755. ret = set_block_group_ro(cache);
  5756. out:
  5757. btrfs_end_transaction(trans, root);
  5758. return ret;
  5759. }
  5760. int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
  5761. struct btrfs_root *root, u64 type)
  5762. {
  5763. u64 alloc_flags = get_alloc_profile(root, type);
  5764. return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
  5765. CHUNK_ALLOC_FORCE);
  5766. }
  5767. /*
  5768. * helper to account the unused space of all the readonly block group in the
  5769. * list. takes mirrors into account.
  5770. */
  5771. static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
  5772. {
  5773. struct btrfs_block_group_cache *block_group;
  5774. u64 free_bytes = 0;
  5775. int factor;
  5776. list_for_each_entry(block_group, groups_list, list) {
  5777. spin_lock(&block_group->lock);
  5778. if (!block_group->ro) {
  5779. spin_unlock(&block_group->lock);
  5780. continue;
  5781. }
  5782. if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
  5783. BTRFS_BLOCK_GROUP_RAID10 |
  5784. BTRFS_BLOCK_GROUP_DUP))
  5785. factor = 2;
  5786. else
  5787. factor = 1;
  5788. free_bytes += (block_group->key.offset -
  5789. btrfs_block_group_used(&block_group->item)) *
  5790. factor;
  5791. spin_unlock(&block_group->lock);
  5792. }
  5793. return free_bytes;
  5794. }
  5795. /*
  5796. * helper to account the unused space of all the readonly block group in the
  5797. * space_info. takes mirrors into account.
  5798. */
  5799. u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
  5800. {
  5801. int i;
  5802. u64 free_bytes = 0;
  5803. spin_lock(&sinfo->lock);
  5804. for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  5805. if (!list_empty(&sinfo->block_groups[i]))
  5806. free_bytes += __btrfs_get_ro_block_group_free_space(
  5807. &sinfo->block_groups[i]);
  5808. spin_unlock(&sinfo->lock);
  5809. return free_bytes;
  5810. }
  5811. int btrfs_set_block_group_rw(struct btrfs_root *root,
  5812. struct btrfs_block_group_cache *cache)
  5813. {
  5814. struct btrfs_space_info *sinfo = cache->space_info;
  5815. u64 num_bytes;
  5816. BUG_ON(!cache->ro);
  5817. spin_lock(&sinfo->lock);
  5818. spin_lock(&cache->lock);
  5819. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  5820. cache->bytes_super - btrfs_block_group_used(&cache->item);
  5821. sinfo->bytes_readonly -= num_bytes;
  5822. cache->ro = 0;
  5823. spin_unlock(&cache->lock);
  5824. spin_unlock(&sinfo->lock);
  5825. return 0;
  5826. }
  5827. /*
  5828. * checks to see if its even possible to relocate this block group.
  5829. *
  5830. * @return - -1 if it's not a good idea to relocate this block group, 0 if its
  5831. * ok to go ahead and try.
  5832. */
  5833. int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
  5834. {
  5835. struct btrfs_block_group_cache *block_group;
  5836. struct btrfs_space_info *space_info;
  5837. struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
  5838. struct btrfs_device *device;
  5839. int full = 0;
  5840. int ret = 0;
  5841. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  5842. /* odd, couldn't find the block group, leave it alone */
  5843. if (!block_group)
  5844. return -1;
  5845. /* no bytes used, we're good */
  5846. if (!btrfs_block_group_used(&block_group->item))
  5847. goto out;
  5848. space_info = block_group->space_info;
  5849. spin_lock(&space_info->lock);
  5850. full = space_info->full;
  5851. /*
  5852. * if this is the last block group we have in this space, we can't
  5853. * relocate it unless we're able to allocate a new chunk below.
  5854. *
  5855. * Otherwise, we need to make sure we have room in the space to handle
  5856. * all of the extents from this block group. If we can, we're good
  5857. */
  5858. if ((space_info->total_bytes != block_group->key.offset) &&
  5859. (space_info->bytes_used + space_info->bytes_reserved +
  5860. space_info->bytes_pinned + space_info->bytes_readonly +
  5861. btrfs_block_group_used(&block_group->item) <
  5862. space_info->total_bytes)) {
  5863. spin_unlock(&space_info->lock);
  5864. goto out;
  5865. }
  5866. spin_unlock(&space_info->lock);
  5867. /*
  5868. * ok we don't have enough space, but maybe we have free space on our
  5869. * devices to allocate new chunks for relocation, so loop through our
  5870. * alloc devices and guess if we have enough space. However, if we
  5871. * were marked as full, then we know there aren't enough chunks, and we
  5872. * can just return.
  5873. */
  5874. ret = -1;
  5875. if (full)
  5876. goto out;
  5877. mutex_lock(&root->fs_info->chunk_mutex);
  5878. list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
  5879. u64 min_free = btrfs_block_group_used(&block_group->item);
  5880. u64 dev_offset;
  5881. /*
  5882. * check to make sure we can actually find a chunk with enough
  5883. * space to fit our block group in.
  5884. */
  5885. if (device->total_bytes > device->bytes_used + min_free) {
  5886. ret = find_free_dev_extent(NULL, device, min_free,
  5887. &dev_offset, NULL);
  5888. if (!ret)
  5889. break;
  5890. ret = -1;
  5891. }
  5892. }
  5893. mutex_unlock(&root->fs_info->chunk_mutex);
  5894. out:
  5895. btrfs_put_block_group(block_group);
  5896. return ret;
  5897. }
  5898. static int find_first_block_group(struct btrfs_root *root,
  5899. struct btrfs_path *path, struct btrfs_key *key)
  5900. {
  5901. int ret = 0;
  5902. struct btrfs_key found_key;
  5903. struct extent_buffer *leaf;
  5904. int slot;
  5905. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  5906. if (ret < 0)
  5907. goto out;
  5908. while (1) {
  5909. slot = path->slots[0];
  5910. leaf = path->nodes[0];
  5911. if (slot >= btrfs_header_nritems(leaf)) {
  5912. ret = btrfs_next_leaf(root, path);
  5913. if (ret == 0)
  5914. continue;
  5915. if (ret < 0)
  5916. goto out;
  5917. break;
  5918. }
  5919. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  5920. if (found_key.objectid >= key->objectid &&
  5921. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  5922. ret = 0;
  5923. goto out;
  5924. }
  5925. path->slots[0]++;
  5926. }
  5927. out:
  5928. return ret;
  5929. }
  5930. void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
  5931. {
  5932. struct btrfs_block_group_cache *block_group;
  5933. u64 last = 0;
  5934. while (1) {
  5935. struct inode *inode;
  5936. block_group = btrfs_lookup_first_block_group(info, last);
  5937. while (block_group) {
  5938. spin_lock(&block_group->lock);
  5939. if (block_group->iref)
  5940. break;
  5941. spin_unlock(&block_group->lock);
  5942. block_group = next_block_group(info->tree_root,
  5943. block_group);
  5944. }
  5945. if (!block_group) {
  5946. if (last == 0)
  5947. break;
  5948. last = 0;
  5949. continue;
  5950. }
  5951. inode = block_group->inode;
  5952. block_group->iref = 0;
  5953. block_group->inode = NULL;
  5954. spin_unlock(&block_group->lock);
  5955. iput(inode);
  5956. last = block_group->key.objectid + block_group->key.offset;
  5957. btrfs_put_block_group(block_group);
  5958. }
  5959. }
  5960. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  5961. {
  5962. struct btrfs_block_group_cache *block_group;
  5963. struct btrfs_space_info *space_info;
  5964. struct btrfs_caching_control *caching_ctl;
  5965. struct rb_node *n;
  5966. down_write(&info->extent_commit_sem);
  5967. while (!list_empty(&info->caching_block_groups)) {
  5968. caching_ctl = list_entry(info->caching_block_groups.next,
  5969. struct btrfs_caching_control, list);
  5970. list_del(&caching_ctl->list);
  5971. put_caching_control(caching_ctl);
  5972. }
  5973. up_write(&info->extent_commit_sem);
  5974. spin_lock(&info->block_group_cache_lock);
  5975. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  5976. block_group = rb_entry(n, struct btrfs_block_group_cache,
  5977. cache_node);
  5978. rb_erase(&block_group->cache_node,
  5979. &info->block_group_cache_tree);
  5980. spin_unlock(&info->block_group_cache_lock);
  5981. down_write(&block_group->space_info->groups_sem);
  5982. list_del(&block_group->list);
  5983. up_write(&block_group->space_info->groups_sem);
  5984. if (block_group->cached == BTRFS_CACHE_STARTED)
  5985. wait_block_group_cache_done(block_group);
  5986. /*
  5987. * We haven't cached this block group, which means we could
  5988. * possibly have excluded extents on this block group.
  5989. */
  5990. if (block_group->cached == BTRFS_CACHE_NO)
  5991. free_excluded_extents(info->extent_root, block_group);
  5992. btrfs_remove_free_space_cache(block_group);
  5993. btrfs_put_block_group(block_group);
  5994. spin_lock(&info->block_group_cache_lock);
  5995. }
  5996. spin_unlock(&info->block_group_cache_lock);
  5997. /* now that all the block groups are freed, go through and
  5998. * free all the space_info structs. This is only called during
  5999. * the final stages of unmount, and so we know nobody is
  6000. * using them. We call synchronize_rcu() once before we start,
  6001. * just to be on the safe side.
  6002. */
  6003. synchronize_rcu();
  6004. release_global_block_rsv(info);
  6005. while(!list_empty(&info->space_info)) {
  6006. space_info = list_entry(info->space_info.next,
  6007. struct btrfs_space_info,
  6008. list);
  6009. if (space_info->bytes_pinned > 0 ||
  6010. space_info->bytes_reserved > 0) {
  6011. WARN_ON(1);
  6012. dump_space_info(space_info, 0, 0);
  6013. }
  6014. list_del(&space_info->list);
  6015. kfree(space_info);
  6016. }
  6017. return 0;
  6018. }
  6019. static void __link_block_group(struct btrfs_space_info *space_info,
  6020. struct btrfs_block_group_cache *cache)
  6021. {
  6022. int index = get_block_group_index(cache);
  6023. down_write(&space_info->groups_sem);
  6024. list_add_tail(&cache->list, &space_info->block_groups[index]);
  6025. up_write(&space_info->groups_sem);
  6026. }
  6027. int btrfs_read_block_groups(struct btrfs_root *root)
  6028. {
  6029. struct btrfs_path *path;
  6030. int ret;
  6031. struct btrfs_block_group_cache *cache;
  6032. struct btrfs_fs_info *info = root->fs_info;
  6033. struct btrfs_space_info *space_info;
  6034. struct btrfs_key key;
  6035. struct btrfs_key found_key;
  6036. struct extent_buffer *leaf;
  6037. int need_clear = 0;
  6038. u64 cache_gen;
  6039. root = info->extent_root;
  6040. key.objectid = 0;
  6041. key.offset = 0;
  6042. btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
  6043. path = btrfs_alloc_path();
  6044. if (!path)
  6045. return -ENOMEM;
  6046. cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
  6047. if (cache_gen != 0 &&
  6048. btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
  6049. need_clear = 1;
  6050. if (btrfs_test_opt(root, CLEAR_CACHE))
  6051. need_clear = 1;
  6052. if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen)
  6053. printk(KERN_INFO "btrfs: disk space caching is enabled\n");
  6054. while (1) {
  6055. ret = find_first_block_group(root, path, &key);
  6056. if (ret > 0)
  6057. break;
  6058. if (ret != 0)
  6059. goto error;
  6060. leaf = path->nodes[0];
  6061. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  6062. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  6063. if (!cache) {
  6064. ret = -ENOMEM;
  6065. goto error;
  6066. }
  6067. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  6068. GFP_NOFS);
  6069. if (!cache->free_space_ctl) {
  6070. kfree(cache);
  6071. ret = -ENOMEM;
  6072. goto error;
  6073. }
  6074. atomic_set(&cache->count, 1);
  6075. spin_lock_init(&cache->lock);
  6076. cache->fs_info = info;
  6077. INIT_LIST_HEAD(&cache->list);
  6078. INIT_LIST_HEAD(&cache->cluster_list);
  6079. if (need_clear)
  6080. cache->disk_cache_state = BTRFS_DC_CLEAR;
  6081. read_extent_buffer(leaf, &cache->item,
  6082. btrfs_item_ptr_offset(leaf, path->slots[0]),
  6083. sizeof(cache->item));
  6084. memcpy(&cache->key, &found_key, sizeof(found_key));
  6085. key.objectid = found_key.objectid + found_key.offset;
  6086. btrfs_release_path(path);
  6087. cache->flags = btrfs_block_group_flags(&cache->item);
  6088. cache->sectorsize = root->sectorsize;
  6089. btrfs_init_free_space_ctl(cache);
  6090. /*
  6091. * We need to exclude the super stripes now so that the space
  6092. * info has super bytes accounted for, otherwise we'll think
  6093. * we have more space than we actually do.
  6094. */
  6095. exclude_super_stripes(root, cache);
  6096. /*
  6097. * check for two cases, either we are full, and therefore
  6098. * don't need to bother with the caching work since we won't
  6099. * find any space, or we are empty, and we can just add all
  6100. * the space in and be done with it. This saves us _alot_ of
  6101. * time, particularly in the full case.
  6102. */
  6103. if (found_key.offset == btrfs_block_group_used(&cache->item)) {
  6104. cache->last_byte_to_unpin = (u64)-1;
  6105. cache->cached = BTRFS_CACHE_FINISHED;
  6106. free_excluded_extents(root, cache);
  6107. } else if (btrfs_block_group_used(&cache->item) == 0) {
  6108. cache->last_byte_to_unpin = (u64)-1;
  6109. cache->cached = BTRFS_CACHE_FINISHED;
  6110. add_new_free_space(cache, root->fs_info,
  6111. found_key.objectid,
  6112. found_key.objectid +
  6113. found_key.offset);
  6114. free_excluded_extents(root, cache);
  6115. }
  6116. ret = update_space_info(info, cache->flags, found_key.offset,
  6117. btrfs_block_group_used(&cache->item),
  6118. &space_info);
  6119. BUG_ON(ret);
  6120. cache->space_info = space_info;
  6121. spin_lock(&cache->space_info->lock);
  6122. cache->space_info->bytes_readonly += cache->bytes_super;
  6123. spin_unlock(&cache->space_info->lock);
  6124. __link_block_group(space_info, cache);
  6125. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  6126. BUG_ON(ret);
  6127. set_avail_alloc_bits(root->fs_info, cache->flags);
  6128. if (btrfs_chunk_readonly(root, cache->key.objectid))
  6129. set_block_group_ro(cache);
  6130. }
  6131. list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
  6132. if (!(get_alloc_profile(root, space_info->flags) &
  6133. (BTRFS_BLOCK_GROUP_RAID10 |
  6134. BTRFS_BLOCK_GROUP_RAID1 |
  6135. BTRFS_BLOCK_GROUP_DUP)))
  6136. continue;
  6137. /*
  6138. * avoid allocating from un-mirrored block group if there are
  6139. * mirrored block groups.
  6140. */
  6141. list_for_each_entry(cache, &space_info->block_groups[3], list)
  6142. set_block_group_ro(cache);
  6143. list_for_each_entry(cache, &space_info->block_groups[4], list)
  6144. set_block_group_ro(cache);
  6145. }
  6146. init_global_block_rsv(info);
  6147. ret = 0;
  6148. error:
  6149. btrfs_free_path(path);
  6150. return ret;
  6151. }
  6152. int btrfs_make_block_group(struct btrfs_trans_handle *trans,
  6153. struct btrfs_root *root, u64 bytes_used,
  6154. u64 type, u64 chunk_objectid, u64 chunk_offset,
  6155. u64 size)
  6156. {
  6157. int ret;
  6158. struct btrfs_root *extent_root;
  6159. struct btrfs_block_group_cache *cache;
  6160. extent_root = root->fs_info->extent_root;
  6161. root->fs_info->last_trans_log_full_commit = trans->transid;
  6162. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  6163. if (!cache)
  6164. return -ENOMEM;
  6165. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  6166. GFP_NOFS);
  6167. if (!cache->free_space_ctl) {
  6168. kfree(cache);
  6169. return -ENOMEM;
  6170. }
  6171. cache->key.objectid = chunk_offset;
  6172. cache->key.offset = size;
  6173. cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  6174. cache->sectorsize = root->sectorsize;
  6175. cache->fs_info = root->fs_info;
  6176. atomic_set(&cache->count, 1);
  6177. spin_lock_init(&cache->lock);
  6178. INIT_LIST_HEAD(&cache->list);
  6179. INIT_LIST_HEAD(&cache->cluster_list);
  6180. btrfs_init_free_space_ctl(cache);
  6181. btrfs_set_block_group_used(&cache->item, bytes_used);
  6182. btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
  6183. cache->flags = type;
  6184. btrfs_set_block_group_flags(&cache->item, type);
  6185. cache->last_byte_to_unpin = (u64)-1;
  6186. cache->cached = BTRFS_CACHE_FINISHED;
  6187. exclude_super_stripes(root, cache);
  6188. add_new_free_space(cache, root->fs_info, chunk_offset,
  6189. chunk_offset + size);
  6190. free_excluded_extents(root, cache);
  6191. ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
  6192. &cache->space_info);
  6193. BUG_ON(ret);
  6194. spin_lock(&cache->space_info->lock);
  6195. cache->space_info->bytes_readonly += cache->bytes_super;
  6196. spin_unlock(&cache->space_info->lock);
  6197. __link_block_group(cache->space_info, cache);
  6198. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  6199. BUG_ON(ret);
  6200. ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
  6201. sizeof(cache->item));
  6202. BUG_ON(ret);
  6203. set_avail_alloc_bits(extent_root->fs_info, type);
  6204. return 0;
  6205. }
  6206. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  6207. struct btrfs_root *root, u64 group_start)
  6208. {
  6209. struct btrfs_path *path;
  6210. struct btrfs_block_group_cache *block_group;
  6211. struct btrfs_free_cluster *cluster;
  6212. struct btrfs_root *tree_root = root->fs_info->tree_root;
  6213. struct btrfs_key key;
  6214. struct inode *inode;
  6215. int ret;
  6216. int factor;
  6217. root = root->fs_info->extent_root;
  6218. block_group = btrfs_lookup_block_group(root->fs_info, group_start);
  6219. BUG_ON(!block_group);
  6220. BUG_ON(!block_group->ro);
  6221. /*
  6222. * Free the reserved super bytes from this block group before
  6223. * remove it.
  6224. */
  6225. free_excluded_extents(root, block_group);
  6226. memcpy(&key, &block_group->key, sizeof(key));
  6227. if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
  6228. BTRFS_BLOCK_GROUP_RAID1 |
  6229. BTRFS_BLOCK_GROUP_RAID10))
  6230. factor = 2;
  6231. else
  6232. factor = 1;
  6233. /* make sure this block group isn't part of an allocation cluster */
  6234. cluster = &root->fs_info->data_alloc_cluster;
  6235. spin_lock(&cluster->refill_lock);
  6236. btrfs_return_cluster_to_free_space(block_group, cluster);
  6237. spin_unlock(&cluster->refill_lock);
  6238. /*
  6239. * make sure this block group isn't part of a metadata
  6240. * allocation cluster
  6241. */
  6242. cluster = &root->fs_info->meta_alloc_cluster;
  6243. spin_lock(&cluster->refill_lock);
  6244. btrfs_return_cluster_to_free_space(block_group, cluster);
  6245. spin_unlock(&cluster->refill_lock);
  6246. path = btrfs_alloc_path();
  6247. BUG_ON(!path);
  6248. inode = lookup_free_space_inode(root, block_group, path);
  6249. if (!IS_ERR(inode)) {
  6250. btrfs_orphan_add(trans, inode);
  6251. clear_nlink(inode);
  6252. /* One for the block groups ref */
  6253. spin_lock(&block_group->lock);
  6254. if (block_group->iref) {
  6255. block_group->iref = 0;
  6256. block_group->inode = NULL;
  6257. spin_unlock(&block_group->lock);
  6258. iput(inode);
  6259. } else {
  6260. spin_unlock(&block_group->lock);
  6261. }
  6262. /* One for our lookup ref */
  6263. iput(inode);
  6264. }
  6265. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  6266. key.offset = block_group->key.objectid;
  6267. key.type = 0;
  6268. ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
  6269. if (ret < 0)
  6270. goto out;
  6271. if (ret > 0)
  6272. btrfs_release_path(path);
  6273. if (ret == 0) {
  6274. ret = btrfs_del_item(trans, tree_root, path);
  6275. if (ret)
  6276. goto out;
  6277. btrfs_release_path(path);
  6278. }
  6279. spin_lock(&root->fs_info->block_group_cache_lock);
  6280. rb_erase(&block_group->cache_node,
  6281. &root->fs_info->block_group_cache_tree);
  6282. spin_unlock(&root->fs_info->block_group_cache_lock);
  6283. down_write(&block_group->space_info->groups_sem);
  6284. /*
  6285. * we must use list_del_init so people can check to see if they
  6286. * are still on the list after taking the semaphore
  6287. */
  6288. list_del_init(&block_group->list);
  6289. up_write(&block_group->space_info->groups_sem);
  6290. if (block_group->cached == BTRFS_CACHE_STARTED)
  6291. wait_block_group_cache_done(block_group);
  6292. btrfs_remove_free_space_cache(block_group);
  6293. spin_lock(&block_group->space_info->lock);
  6294. block_group->space_info->total_bytes -= block_group->key.offset;
  6295. block_group->space_info->bytes_readonly -= block_group->key.offset;
  6296. block_group->space_info->disk_total -= block_group->key.offset * factor;
  6297. spin_unlock(&block_group->space_info->lock);
  6298. memcpy(&key, &block_group->key, sizeof(key));
  6299. btrfs_clear_space_info_full(root->fs_info);
  6300. btrfs_put_block_group(block_group);
  6301. btrfs_put_block_group(block_group);
  6302. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  6303. if (ret > 0)
  6304. ret = -EIO;
  6305. if (ret < 0)
  6306. goto out;
  6307. ret = btrfs_del_item(trans, root, path);
  6308. out:
  6309. btrfs_free_path(path);
  6310. return ret;
  6311. }
  6312. int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
  6313. {
  6314. struct btrfs_space_info *space_info;
  6315. struct btrfs_super_block *disk_super;
  6316. u64 features;
  6317. u64 flags;
  6318. int mixed = 0;
  6319. int ret;
  6320. disk_super = &fs_info->super_copy;
  6321. if (!btrfs_super_root(disk_super))
  6322. return 1;
  6323. features = btrfs_super_incompat_flags(disk_super);
  6324. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  6325. mixed = 1;
  6326. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  6327. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  6328. if (ret)
  6329. goto out;
  6330. if (mixed) {
  6331. flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
  6332. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  6333. } else {
  6334. flags = BTRFS_BLOCK_GROUP_METADATA;
  6335. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  6336. if (ret)
  6337. goto out;
  6338. flags = BTRFS_BLOCK_GROUP_DATA;
  6339. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  6340. }
  6341. out:
  6342. return ret;
  6343. }
  6344. int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  6345. {
  6346. return unpin_extent_range(root, start, end);
  6347. }
  6348. int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
  6349. u64 num_bytes, u64 *actual_bytes)
  6350. {
  6351. return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
  6352. }
  6353. int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
  6354. {
  6355. struct btrfs_fs_info *fs_info = root->fs_info;
  6356. struct btrfs_block_group_cache *cache = NULL;
  6357. u64 group_trimmed;
  6358. u64 start;
  6359. u64 end;
  6360. u64 trimmed = 0;
  6361. int ret = 0;
  6362. cache = btrfs_lookup_block_group(fs_info, range->start);
  6363. while (cache) {
  6364. if (cache->key.objectid >= (range->start + range->len)) {
  6365. btrfs_put_block_group(cache);
  6366. break;
  6367. }
  6368. start = max(range->start, cache->key.objectid);
  6369. end = min(range->start + range->len,
  6370. cache->key.objectid + cache->key.offset);
  6371. if (end - start >= range->minlen) {
  6372. if (!block_group_cache_done(cache)) {
  6373. ret = cache_block_group(cache, NULL, root, 0);
  6374. if (!ret)
  6375. wait_block_group_cache_done(cache);
  6376. }
  6377. ret = btrfs_trim_block_group(cache,
  6378. &group_trimmed,
  6379. start,
  6380. end,
  6381. range->minlen);
  6382. trimmed += group_trimmed;
  6383. if (ret) {
  6384. btrfs_put_block_group(cache);
  6385. break;
  6386. }
  6387. }
  6388. cache = next_block_group(fs_info->tree_root, cache);
  6389. }
  6390. range->len = trimmed;
  6391. return ret;
  6392. }