xattr.c 186 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * xattr.c
  5. *
  6. * Copyright (C) 2004, 2008 Oracle. All rights reserved.
  7. *
  8. * CREDITS:
  9. * Lots of code in this file is copy from linux/fs/ext3/xattr.c.
  10. * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public
  14. * License version 2 as published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. */
  21. #include <linux/capability.h>
  22. #include <linux/fs.h>
  23. #include <linux/types.h>
  24. #include <linux/slab.h>
  25. #include <linux/highmem.h>
  26. #include <linux/pagemap.h>
  27. #include <linux/uio.h>
  28. #include <linux/sched.h>
  29. #include <linux/splice.h>
  30. #include <linux/mount.h>
  31. #include <linux/writeback.h>
  32. #include <linux/falloc.h>
  33. #include <linux/sort.h>
  34. #include <linux/init.h>
  35. #include <linux/module.h>
  36. #include <linux/string.h>
  37. #include <linux/security.h>
  38. #define MLOG_MASK_PREFIX ML_XATTR
  39. #include <cluster/masklog.h>
  40. #include "ocfs2.h"
  41. #include "alloc.h"
  42. #include "blockcheck.h"
  43. #include "dlmglue.h"
  44. #include "file.h"
  45. #include "symlink.h"
  46. #include "sysfile.h"
  47. #include "inode.h"
  48. #include "journal.h"
  49. #include "ocfs2_fs.h"
  50. #include "suballoc.h"
  51. #include "uptodate.h"
  52. #include "buffer_head_io.h"
  53. #include "super.h"
  54. #include "xattr.h"
  55. #include "refcounttree.h"
  56. #include "acl.h"
  57. struct ocfs2_xattr_def_value_root {
  58. struct ocfs2_xattr_value_root xv;
  59. struct ocfs2_extent_rec er;
  60. };
  61. struct ocfs2_xattr_bucket {
  62. /* The inode these xattrs are associated with */
  63. struct inode *bu_inode;
  64. /* The actual buffers that make up the bucket */
  65. struct buffer_head *bu_bhs[OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET];
  66. /* How many blocks make up one bucket for this filesystem */
  67. int bu_blocks;
  68. };
  69. struct ocfs2_xattr_set_ctxt {
  70. handle_t *handle;
  71. struct ocfs2_alloc_context *meta_ac;
  72. struct ocfs2_alloc_context *data_ac;
  73. struct ocfs2_cached_dealloc_ctxt dealloc;
  74. };
  75. #define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root))
  76. #define OCFS2_XATTR_INLINE_SIZE 80
  77. #define OCFS2_XATTR_HEADER_GAP 4
  78. #define OCFS2_XATTR_FREE_IN_IBODY (OCFS2_MIN_XATTR_INLINE_SIZE \
  79. - sizeof(struct ocfs2_xattr_header) \
  80. - OCFS2_XATTR_HEADER_GAP)
  81. #define OCFS2_XATTR_FREE_IN_BLOCK(ptr) ((ptr)->i_sb->s_blocksize \
  82. - sizeof(struct ocfs2_xattr_block) \
  83. - sizeof(struct ocfs2_xattr_header) \
  84. - OCFS2_XATTR_HEADER_GAP)
  85. static struct ocfs2_xattr_def_value_root def_xv = {
  86. .xv.xr_list.l_count = cpu_to_le16(1),
  87. };
  88. struct xattr_handler *ocfs2_xattr_handlers[] = {
  89. &ocfs2_xattr_user_handler,
  90. &ocfs2_xattr_acl_access_handler,
  91. &ocfs2_xattr_acl_default_handler,
  92. &ocfs2_xattr_trusted_handler,
  93. &ocfs2_xattr_security_handler,
  94. NULL
  95. };
  96. static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
  97. [OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler,
  98. [OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS]
  99. = &ocfs2_xattr_acl_access_handler,
  100. [OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT]
  101. = &ocfs2_xattr_acl_default_handler,
  102. [OCFS2_XATTR_INDEX_TRUSTED] = &ocfs2_xattr_trusted_handler,
  103. [OCFS2_XATTR_INDEX_SECURITY] = &ocfs2_xattr_security_handler,
  104. };
  105. struct ocfs2_xattr_info {
  106. int name_index;
  107. const char *name;
  108. const void *value;
  109. size_t value_len;
  110. };
  111. struct ocfs2_xattr_search {
  112. struct buffer_head *inode_bh;
  113. /*
  114. * xattr_bh point to the block buffer head which has extended attribute
  115. * when extended attribute in inode, xattr_bh is equal to inode_bh.
  116. */
  117. struct buffer_head *xattr_bh;
  118. struct ocfs2_xattr_header *header;
  119. struct ocfs2_xattr_bucket *bucket;
  120. void *base;
  121. void *end;
  122. struct ocfs2_xattr_entry *here;
  123. int not_found;
  124. };
  125. static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
  126. struct ocfs2_xattr_header *xh,
  127. int index,
  128. int *block_off,
  129. int *new_offset);
  130. static int ocfs2_xattr_block_find(struct inode *inode,
  131. int name_index,
  132. const char *name,
  133. struct ocfs2_xattr_search *xs);
  134. static int ocfs2_xattr_index_block_find(struct inode *inode,
  135. struct buffer_head *root_bh,
  136. int name_index,
  137. const char *name,
  138. struct ocfs2_xattr_search *xs);
  139. static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
  140. struct buffer_head *blk_bh,
  141. char *buffer,
  142. size_t buffer_size);
  143. static int ocfs2_xattr_create_index_block(struct inode *inode,
  144. struct ocfs2_xattr_search *xs,
  145. struct ocfs2_xattr_set_ctxt *ctxt);
  146. static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
  147. struct ocfs2_xattr_info *xi,
  148. struct ocfs2_xattr_search *xs,
  149. struct ocfs2_xattr_set_ctxt *ctxt);
  150. typedef int (xattr_tree_rec_func)(struct inode *inode,
  151. struct buffer_head *root_bh,
  152. u64 blkno, u32 cpos, u32 len, void *para);
  153. static int ocfs2_iterate_xattr_index_block(struct inode *inode,
  154. struct buffer_head *root_bh,
  155. xattr_tree_rec_func *rec_func,
  156. void *para);
  157. static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
  158. struct ocfs2_xattr_bucket *bucket,
  159. void *para);
  160. static int ocfs2_rm_xattr_cluster(struct inode *inode,
  161. struct buffer_head *root_bh,
  162. u64 blkno,
  163. u32 cpos,
  164. u32 len,
  165. void *para);
  166. static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
  167. u64 src_blk, u64 last_blk, u64 to_blk,
  168. unsigned int start_bucket,
  169. u32 *first_hash);
  170. static int ocfs2_prepare_refcount_xattr(struct inode *inode,
  171. struct ocfs2_dinode *di,
  172. struct ocfs2_xattr_info *xi,
  173. struct ocfs2_xattr_search *xis,
  174. struct ocfs2_xattr_search *xbs,
  175. struct ocfs2_refcount_tree **ref_tree,
  176. int *meta_need,
  177. int *credits);
  178. static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
  179. struct ocfs2_xattr_bucket *bucket,
  180. int offset,
  181. struct ocfs2_xattr_value_root **xv,
  182. struct buffer_head **bh);
  183. static inline u16 ocfs2_xattr_buckets_per_cluster(struct ocfs2_super *osb)
  184. {
  185. return (1 << osb->s_clustersize_bits) / OCFS2_XATTR_BUCKET_SIZE;
  186. }
  187. static inline u16 ocfs2_blocks_per_xattr_bucket(struct super_block *sb)
  188. {
  189. return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits);
  190. }
  191. static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb)
  192. {
  193. u16 len = sb->s_blocksize -
  194. offsetof(struct ocfs2_xattr_header, xh_entries);
  195. return len / sizeof(struct ocfs2_xattr_entry);
  196. }
  197. #define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr)
  198. #define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data)
  199. #define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0))
  200. static struct ocfs2_xattr_bucket *ocfs2_xattr_bucket_new(struct inode *inode)
  201. {
  202. struct ocfs2_xattr_bucket *bucket;
  203. int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
  204. BUG_ON(blks > OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET);
  205. bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS);
  206. if (bucket) {
  207. bucket->bu_inode = inode;
  208. bucket->bu_blocks = blks;
  209. }
  210. return bucket;
  211. }
  212. static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket)
  213. {
  214. int i;
  215. for (i = 0; i < bucket->bu_blocks; i++) {
  216. brelse(bucket->bu_bhs[i]);
  217. bucket->bu_bhs[i] = NULL;
  218. }
  219. }
  220. static void ocfs2_xattr_bucket_free(struct ocfs2_xattr_bucket *bucket)
  221. {
  222. if (bucket) {
  223. ocfs2_xattr_bucket_relse(bucket);
  224. bucket->bu_inode = NULL;
  225. kfree(bucket);
  226. }
  227. }
  228. /*
  229. * A bucket that has never been written to disk doesn't need to be
  230. * read. We just need the buffer_heads. Don't call this for
  231. * buckets that are already on disk. ocfs2_read_xattr_bucket() initializes
  232. * them fully.
  233. */
  234. static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
  235. u64 xb_blkno)
  236. {
  237. int i, rc = 0;
  238. for (i = 0; i < bucket->bu_blocks; i++) {
  239. bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb,
  240. xb_blkno + i);
  241. if (!bucket->bu_bhs[i]) {
  242. rc = -EIO;
  243. mlog_errno(rc);
  244. break;
  245. }
  246. if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
  247. bucket->bu_bhs[i]))
  248. ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
  249. bucket->bu_bhs[i]);
  250. }
  251. if (rc)
  252. ocfs2_xattr_bucket_relse(bucket);
  253. return rc;
  254. }
  255. /* Read the xattr bucket at xb_blkno */
  256. static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
  257. u64 xb_blkno)
  258. {
  259. int rc;
  260. rc = ocfs2_read_blocks(INODE_CACHE(bucket->bu_inode), xb_blkno,
  261. bucket->bu_blocks, bucket->bu_bhs, 0,
  262. NULL);
  263. if (!rc) {
  264. spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
  265. rc = ocfs2_validate_meta_ecc_bhs(bucket->bu_inode->i_sb,
  266. bucket->bu_bhs,
  267. bucket->bu_blocks,
  268. &bucket_xh(bucket)->xh_check);
  269. spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
  270. if (rc)
  271. mlog_errno(rc);
  272. }
  273. if (rc)
  274. ocfs2_xattr_bucket_relse(bucket);
  275. return rc;
  276. }
  277. static int ocfs2_xattr_bucket_journal_access(handle_t *handle,
  278. struct ocfs2_xattr_bucket *bucket,
  279. int type)
  280. {
  281. int i, rc = 0;
  282. for (i = 0; i < bucket->bu_blocks; i++) {
  283. rc = ocfs2_journal_access(handle,
  284. INODE_CACHE(bucket->bu_inode),
  285. bucket->bu_bhs[i], type);
  286. if (rc) {
  287. mlog_errno(rc);
  288. break;
  289. }
  290. }
  291. return rc;
  292. }
  293. static void ocfs2_xattr_bucket_journal_dirty(handle_t *handle,
  294. struct ocfs2_xattr_bucket *bucket)
  295. {
  296. int i;
  297. spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
  298. ocfs2_compute_meta_ecc_bhs(bucket->bu_inode->i_sb,
  299. bucket->bu_bhs, bucket->bu_blocks,
  300. &bucket_xh(bucket)->xh_check);
  301. spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
  302. for (i = 0; i < bucket->bu_blocks; i++)
  303. ocfs2_journal_dirty(handle, bucket->bu_bhs[i]);
  304. }
  305. static void ocfs2_xattr_bucket_copy_data(struct ocfs2_xattr_bucket *dest,
  306. struct ocfs2_xattr_bucket *src)
  307. {
  308. int i;
  309. int blocksize = src->bu_inode->i_sb->s_blocksize;
  310. BUG_ON(dest->bu_blocks != src->bu_blocks);
  311. BUG_ON(dest->bu_inode != src->bu_inode);
  312. for (i = 0; i < src->bu_blocks; i++) {
  313. memcpy(bucket_block(dest, i), bucket_block(src, i),
  314. blocksize);
  315. }
  316. }
  317. static int ocfs2_validate_xattr_block(struct super_block *sb,
  318. struct buffer_head *bh)
  319. {
  320. int rc;
  321. struct ocfs2_xattr_block *xb =
  322. (struct ocfs2_xattr_block *)bh->b_data;
  323. mlog(0, "Validating xattr block %llu\n",
  324. (unsigned long long)bh->b_blocknr);
  325. BUG_ON(!buffer_uptodate(bh));
  326. /*
  327. * If the ecc fails, we return the error but otherwise
  328. * leave the filesystem running. We know any error is
  329. * local to this block.
  330. */
  331. rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &xb->xb_check);
  332. if (rc)
  333. return rc;
  334. /*
  335. * Errors after here are fatal
  336. */
  337. if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) {
  338. ocfs2_error(sb,
  339. "Extended attribute block #%llu has bad "
  340. "signature %.*s",
  341. (unsigned long long)bh->b_blocknr, 7,
  342. xb->xb_signature);
  343. return -EINVAL;
  344. }
  345. if (le64_to_cpu(xb->xb_blkno) != bh->b_blocknr) {
  346. ocfs2_error(sb,
  347. "Extended attribute block #%llu has an "
  348. "invalid xb_blkno of %llu",
  349. (unsigned long long)bh->b_blocknr,
  350. (unsigned long long)le64_to_cpu(xb->xb_blkno));
  351. return -EINVAL;
  352. }
  353. if (le32_to_cpu(xb->xb_fs_generation) != OCFS2_SB(sb)->fs_generation) {
  354. ocfs2_error(sb,
  355. "Extended attribute block #%llu has an invalid "
  356. "xb_fs_generation of #%u",
  357. (unsigned long long)bh->b_blocknr,
  358. le32_to_cpu(xb->xb_fs_generation));
  359. return -EINVAL;
  360. }
  361. return 0;
  362. }
  363. static int ocfs2_read_xattr_block(struct inode *inode, u64 xb_blkno,
  364. struct buffer_head **bh)
  365. {
  366. int rc;
  367. struct buffer_head *tmp = *bh;
  368. rc = ocfs2_read_block(INODE_CACHE(inode), xb_blkno, &tmp,
  369. ocfs2_validate_xattr_block);
  370. /* If ocfs2_read_block() got us a new bh, pass it up. */
  371. if (!rc && !*bh)
  372. *bh = tmp;
  373. return rc;
  374. }
  375. static inline const char *ocfs2_xattr_prefix(int name_index)
  376. {
  377. struct xattr_handler *handler = NULL;
  378. if (name_index > 0 && name_index < OCFS2_XATTR_MAX)
  379. handler = ocfs2_xattr_handler_map[name_index];
  380. return handler ? handler->prefix : NULL;
  381. }
  382. static u32 ocfs2_xattr_name_hash(struct inode *inode,
  383. const char *name,
  384. int name_len)
  385. {
  386. /* Get hash value of uuid from super block */
  387. u32 hash = OCFS2_SB(inode->i_sb)->uuid_hash;
  388. int i;
  389. /* hash extended attribute name */
  390. for (i = 0; i < name_len; i++) {
  391. hash = (hash << OCFS2_HASH_SHIFT) ^
  392. (hash >> (8*sizeof(hash) - OCFS2_HASH_SHIFT)) ^
  393. *name++;
  394. }
  395. return hash;
  396. }
  397. /*
  398. * ocfs2_xattr_hash_entry()
  399. *
  400. * Compute the hash of an extended attribute.
  401. */
  402. static void ocfs2_xattr_hash_entry(struct inode *inode,
  403. struct ocfs2_xattr_header *header,
  404. struct ocfs2_xattr_entry *entry)
  405. {
  406. u32 hash = 0;
  407. char *name = (char *)header + le16_to_cpu(entry->xe_name_offset);
  408. hash = ocfs2_xattr_name_hash(inode, name, entry->xe_name_len);
  409. entry->xe_name_hash = cpu_to_le32(hash);
  410. return;
  411. }
  412. static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len)
  413. {
  414. int size = 0;
  415. if (value_len <= OCFS2_XATTR_INLINE_SIZE)
  416. size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
  417. else
  418. size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
  419. size += sizeof(struct ocfs2_xattr_entry);
  420. return size;
  421. }
  422. int ocfs2_calc_security_init(struct inode *dir,
  423. struct ocfs2_security_xattr_info *si,
  424. int *want_clusters,
  425. int *xattr_credits,
  426. struct ocfs2_alloc_context **xattr_ac)
  427. {
  428. int ret = 0;
  429. struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
  430. int s_size = ocfs2_xattr_entry_real_size(strlen(si->name),
  431. si->value_len);
  432. /*
  433. * The max space of security xattr taken inline is
  434. * 256(name) + 80(value) + 16(entry) = 352 bytes,
  435. * So reserve one metadata block for it is ok.
  436. */
  437. if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
  438. s_size > OCFS2_XATTR_FREE_IN_IBODY) {
  439. ret = ocfs2_reserve_new_metadata_blocks(osb, 1, xattr_ac);
  440. if (ret) {
  441. mlog_errno(ret);
  442. return ret;
  443. }
  444. *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
  445. }
  446. /* reserve clusters for xattr value which will be set in B tree*/
  447. if (si->value_len > OCFS2_XATTR_INLINE_SIZE) {
  448. int new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
  449. si->value_len);
  450. *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
  451. new_clusters);
  452. *want_clusters += new_clusters;
  453. }
  454. return ret;
  455. }
  456. int ocfs2_calc_xattr_init(struct inode *dir,
  457. struct buffer_head *dir_bh,
  458. int mode,
  459. struct ocfs2_security_xattr_info *si,
  460. int *want_clusters,
  461. int *xattr_credits,
  462. int *want_meta)
  463. {
  464. int ret = 0;
  465. struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
  466. int s_size = 0, a_size = 0, acl_len = 0, new_clusters;
  467. if (si->enable)
  468. s_size = ocfs2_xattr_entry_real_size(strlen(si->name),
  469. si->value_len);
  470. if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
  471. acl_len = ocfs2_xattr_get_nolock(dir, dir_bh,
  472. OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT,
  473. "", NULL, 0);
  474. if (acl_len > 0) {
  475. a_size = ocfs2_xattr_entry_real_size(0, acl_len);
  476. if (S_ISDIR(mode))
  477. a_size <<= 1;
  478. } else if (acl_len != 0 && acl_len != -ENODATA) {
  479. mlog_errno(ret);
  480. return ret;
  481. }
  482. }
  483. if (!(s_size + a_size))
  484. return ret;
  485. /*
  486. * The max space of security xattr taken inline is
  487. * 256(name) + 80(value) + 16(entry) = 352 bytes,
  488. * The max space of acl xattr taken inline is
  489. * 80(value) + 16(entry) * 2(if directory) = 192 bytes,
  490. * when blocksize = 512, may reserve one more cluser for
  491. * xattr bucket, otherwise reserve one metadata block
  492. * for them is ok.
  493. * If this is a new directory with inline data,
  494. * we choose to reserve the entire inline area for
  495. * directory contents and force an external xattr block.
  496. */
  497. if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE ||
  498. (S_ISDIR(mode) && ocfs2_supports_inline_data(osb)) ||
  499. (s_size + a_size) > OCFS2_XATTR_FREE_IN_IBODY) {
  500. *want_meta = *want_meta + 1;
  501. *xattr_credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
  502. }
  503. if (dir->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE &&
  504. (s_size + a_size) > OCFS2_XATTR_FREE_IN_BLOCK(dir)) {
  505. *want_clusters += 1;
  506. *xattr_credits += ocfs2_blocks_per_xattr_bucket(dir->i_sb);
  507. }
  508. /*
  509. * reserve credits and clusters for xattrs which has large value
  510. * and have to be set outside
  511. */
  512. if (si->enable && si->value_len > OCFS2_XATTR_INLINE_SIZE) {
  513. new_clusters = ocfs2_clusters_for_bytes(dir->i_sb,
  514. si->value_len);
  515. *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
  516. new_clusters);
  517. *want_clusters += new_clusters;
  518. }
  519. if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL &&
  520. acl_len > OCFS2_XATTR_INLINE_SIZE) {
  521. /* for directory, it has DEFAULT and ACCESS two types of acls */
  522. new_clusters = (S_ISDIR(mode) ? 2 : 1) *
  523. ocfs2_clusters_for_bytes(dir->i_sb, acl_len);
  524. *xattr_credits += ocfs2_clusters_to_blocks(dir->i_sb,
  525. new_clusters);
  526. *want_clusters += new_clusters;
  527. }
  528. return ret;
  529. }
  530. static int ocfs2_xattr_extend_allocation(struct inode *inode,
  531. u32 clusters_to_add,
  532. struct ocfs2_xattr_value_buf *vb,
  533. struct ocfs2_xattr_set_ctxt *ctxt)
  534. {
  535. int status = 0;
  536. handle_t *handle = ctxt->handle;
  537. enum ocfs2_alloc_restarted why;
  538. u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
  539. struct ocfs2_extent_tree et;
  540. mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add);
  541. ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
  542. status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
  543. OCFS2_JOURNAL_ACCESS_WRITE);
  544. if (status < 0) {
  545. mlog_errno(status);
  546. goto leave;
  547. }
  548. prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
  549. status = ocfs2_add_clusters_in_btree(handle,
  550. &et,
  551. &logical_start,
  552. clusters_to_add,
  553. 0,
  554. ctxt->data_ac,
  555. ctxt->meta_ac,
  556. &why);
  557. if (status < 0) {
  558. mlog_errno(status);
  559. goto leave;
  560. }
  561. status = ocfs2_journal_dirty(handle, vb->vb_bh);
  562. if (status < 0) {
  563. mlog_errno(status);
  564. goto leave;
  565. }
  566. clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) - prev_clusters;
  567. /*
  568. * We should have already allocated enough space before the transaction,
  569. * so no need to restart.
  570. */
  571. BUG_ON(why != RESTART_NONE || clusters_to_add);
  572. leave:
  573. return status;
  574. }
  575. static int __ocfs2_remove_xattr_range(struct inode *inode,
  576. struct ocfs2_xattr_value_buf *vb,
  577. u32 cpos, u32 phys_cpos, u32 len,
  578. unsigned int ext_flags,
  579. struct ocfs2_xattr_set_ctxt *ctxt)
  580. {
  581. int ret;
  582. u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
  583. handle_t *handle = ctxt->handle;
  584. struct ocfs2_extent_tree et;
  585. ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
  586. ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
  587. OCFS2_JOURNAL_ACCESS_WRITE);
  588. if (ret) {
  589. mlog_errno(ret);
  590. goto out;
  591. }
  592. ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac,
  593. &ctxt->dealloc);
  594. if (ret) {
  595. mlog_errno(ret);
  596. goto out;
  597. }
  598. le32_add_cpu(&vb->vb_xv->xr_clusters, -len);
  599. ret = ocfs2_journal_dirty(handle, vb->vb_bh);
  600. if (ret) {
  601. mlog_errno(ret);
  602. goto out;
  603. }
  604. if (ext_flags & OCFS2_EXT_REFCOUNTED)
  605. ret = ocfs2_decrease_refcount(inode, handle,
  606. ocfs2_blocks_to_clusters(inode->i_sb,
  607. phys_blkno),
  608. len, ctxt->meta_ac, &ctxt->dealloc, 1);
  609. else
  610. ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc,
  611. phys_blkno, len);
  612. if (ret)
  613. mlog_errno(ret);
  614. out:
  615. return ret;
  616. }
  617. static int ocfs2_xattr_shrink_size(struct inode *inode,
  618. u32 old_clusters,
  619. u32 new_clusters,
  620. struct ocfs2_xattr_value_buf *vb,
  621. struct ocfs2_xattr_set_ctxt *ctxt)
  622. {
  623. int ret = 0;
  624. unsigned int ext_flags;
  625. u32 trunc_len, cpos, phys_cpos, alloc_size;
  626. u64 block;
  627. if (old_clusters <= new_clusters)
  628. return 0;
  629. cpos = new_clusters;
  630. trunc_len = old_clusters - new_clusters;
  631. while (trunc_len) {
  632. ret = ocfs2_xattr_get_clusters(inode, cpos, &phys_cpos,
  633. &alloc_size,
  634. &vb->vb_xv->xr_list, &ext_flags);
  635. if (ret) {
  636. mlog_errno(ret);
  637. goto out;
  638. }
  639. if (alloc_size > trunc_len)
  640. alloc_size = trunc_len;
  641. ret = __ocfs2_remove_xattr_range(inode, vb, cpos,
  642. phys_cpos, alloc_size,
  643. ext_flags, ctxt);
  644. if (ret) {
  645. mlog_errno(ret);
  646. goto out;
  647. }
  648. block = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
  649. ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode),
  650. block, alloc_size);
  651. cpos += alloc_size;
  652. trunc_len -= alloc_size;
  653. }
  654. out:
  655. return ret;
  656. }
  657. static int ocfs2_xattr_value_truncate(struct inode *inode,
  658. struct ocfs2_xattr_value_buf *vb,
  659. int len,
  660. struct ocfs2_xattr_set_ctxt *ctxt)
  661. {
  662. int ret;
  663. u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, len);
  664. u32 old_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
  665. if (new_clusters == old_clusters)
  666. return 0;
  667. if (new_clusters > old_clusters)
  668. ret = ocfs2_xattr_extend_allocation(inode,
  669. new_clusters - old_clusters,
  670. vb, ctxt);
  671. else
  672. ret = ocfs2_xattr_shrink_size(inode,
  673. old_clusters, new_clusters,
  674. vb, ctxt);
  675. return ret;
  676. }
  677. static int ocfs2_xattr_list_entry(char *buffer, size_t size,
  678. size_t *result, const char *prefix,
  679. const char *name, int name_len)
  680. {
  681. char *p = buffer + *result;
  682. int prefix_len = strlen(prefix);
  683. int total_len = prefix_len + name_len + 1;
  684. *result += total_len;
  685. /* we are just looking for how big our buffer needs to be */
  686. if (!size)
  687. return 0;
  688. if (*result > size)
  689. return -ERANGE;
  690. memcpy(p, prefix, prefix_len);
  691. memcpy(p + prefix_len, name, name_len);
  692. p[prefix_len + name_len] = '\0';
  693. return 0;
  694. }
  695. static int ocfs2_xattr_list_entries(struct inode *inode,
  696. struct ocfs2_xattr_header *header,
  697. char *buffer, size_t buffer_size)
  698. {
  699. size_t result = 0;
  700. int i, type, ret;
  701. const char *prefix, *name;
  702. for (i = 0 ; i < le16_to_cpu(header->xh_count); i++) {
  703. struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
  704. type = ocfs2_xattr_get_type(entry);
  705. prefix = ocfs2_xattr_prefix(type);
  706. if (prefix) {
  707. name = (const char *)header +
  708. le16_to_cpu(entry->xe_name_offset);
  709. ret = ocfs2_xattr_list_entry(buffer, buffer_size,
  710. &result, prefix, name,
  711. entry->xe_name_len);
  712. if (ret)
  713. return ret;
  714. }
  715. }
  716. return result;
  717. }
  718. int ocfs2_has_inline_xattr_value_outside(struct inode *inode,
  719. struct ocfs2_dinode *di)
  720. {
  721. struct ocfs2_xattr_header *xh;
  722. int i;
  723. xh = (struct ocfs2_xattr_header *)
  724. ((void *)di + inode->i_sb->s_blocksize -
  725. le16_to_cpu(di->i_xattr_inline_size));
  726. for (i = 0; i < le16_to_cpu(xh->xh_count); i++)
  727. if (!ocfs2_xattr_is_local(&xh->xh_entries[i]))
  728. return 1;
  729. return 0;
  730. }
  731. static int ocfs2_xattr_ibody_list(struct inode *inode,
  732. struct ocfs2_dinode *di,
  733. char *buffer,
  734. size_t buffer_size)
  735. {
  736. struct ocfs2_xattr_header *header = NULL;
  737. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  738. int ret = 0;
  739. if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
  740. return ret;
  741. header = (struct ocfs2_xattr_header *)
  742. ((void *)di + inode->i_sb->s_blocksize -
  743. le16_to_cpu(di->i_xattr_inline_size));
  744. ret = ocfs2_xattr_list_entries(inode, header, buffer, buffer_size);
  745. return ret;
  746. }
  747. static int ocfs2_xattr_block_list(struct inode *inode,
  748. struct ocfs2_dinode *di,
  749. char *buffer,
  750. size_t buffer_size)
  751. {
  752. struct buffer_head *blk_bh = NULL;
  753. struct ocfs2_xattr_block *xb;
  754. int ret = 0;
  755. if (!di->i_xattr_loc)
  756. return ret;
  757. ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
  758. &blk_bh);
  759. if (ret < 0) {
  760. mlog_errno(ret);
  761. return ret;
  762. }
  763. xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
  764. if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
  765. struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
  766. ret = ocfs2_xattr_list_entries(inode, header,
  767. buffer, buffer_size);
  768. } else
  769. ret = ocfs2_xattr_tree_list_index_block(inode, blk_bh,
  770. buffer, buffer_size);
  771. brelse(blk_bh);
  772. return ret;
  773. }
  774. ssize_t ocfs2_listxattr(struct dentry *dentry,
  775. char *buffer,
  776. size_t size)
  777. {
  778. int ret = 0, i_ret = 0, b_ret = 0;
  779. struct buffer_head *di_bh = NULL;
  780. struct ocfs2_dinode *di = NULL;
  781. struct ocfs2_inode_info *oi = OCFS2_I(dentry->d_inode);
  782. if (!ocfs2_supports_xattr(OCFS2_SB(dentry->d_sb)))
  783. return -EOPNOTSUPP;
  784. if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
  785. return ret;
  786. ret = ocfs2_inode_lock(dentry->d_inode, &di_bh, 0);
  787. if (ret < 0) {
  788. mlog_errno(ret);
  789. return ret;
  790. }
  791. di = (struct ocfs2_dinode *)di_bh->b_data;
  792. down_read(&oi->ip_xattr_sem);
  793. i_ret = ocfs2_xattr_ibody_list(dentry->d_inode, di, buffer, size);
  794. if (i_ret < 0)
  795. b_ret = 0;
  796. else {
  797. if (buffer) {
  798. buffer += i_ret;
  799. size -= i_ret;
  800. }
  801. b_ret = ocfs2_xattr_block_list(dentry->d_inode, di,
  802. buffer, size);
  803. if (b_ret < 0)
  804. i_ret = 0;
  805. }
  806. up_read(&oi->ip_xattr_sem);
  807. ocfs2_inode_unlock(dentry->d_inode, 0);
  808. brelse(di_bh);
  809. return i_ret + b_ret;
  810. }
  811. static int ocfs2_xattr_find_entry(int name_index,
  812. const char *name,
  813. struct ocfs2_xattr_search *xs)
  814. {
  815. struct ocfs2_xattr_entry *entry;
  816. size_t name_len;
  817. int i, cmp = 1;
  818. if (name == NULL)
  819. return -EINVAL;
  820. name_len = strlen(name);
  821. entry = xs->here;
  822. for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
  823. cmp = name_index - ocfs2_xattr_get_type(entry);
  824. if (!cmp)
  825. cmp = name_len - entry->xe_name_len;
  826. if (!cmp)
  827. cmp = memcmp(name, (xs->base +
  828. le16_to_cpu(entry->xe_name_offset)),
  829. name_len);
  830. if (cmp == 0)
  831. break;
  832. entry += 1;
  833. }
  834. xs->here = entry;
  835. return cmp ? -ENODATA : 0;
  836. }
  837. static int ocfs2_xattr_get_value_outside(struct inode *inode,
  838. struct ocfs2_xattr_value_root *xv,
  839. void *buffer,
  840. size_t len)
  841. {
  842. u32 cpos, p_cluster, num_clusters, bpc, clusters;
  843. u64 blkno;
  844. int i, ret = 0;
  845. size_t cplen, blocksize;
  846. struct buffer_head *bh = NULL;
  847. struct ocfs2_extent_list *el;
  848. el = &xv->xr_list;
  849. clusters = le32_to_cpu(xv->xr_clusters);
  850. bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
  851. blocksize = inode->i_sb->s_blocksize;
  852. cpos = 0;
  853. while (cpos < clusters) {
  854. ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
  855. &num_clusters, el, NULL);
  856. if (ret) {
  857. mlog_errno(ret);
  858. goto out;
  859. }
  860. blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
  861. /* Copy ocfs2_xattr_value */
  862. for (i = 0; i < num_clusters * bpc; i++, blkno++) {
  863. ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
  864. &bh, NULL);
  865. if (ret) {
  866. mlog_errno(ret);
  867. goto out;
  868. }
  869. cplen = len >= blocksize ? blocksize : len;
  870. memcpy(buffer, bh->b_data, cplen);
  871. len -= cplen;
  872. buffer += cplen;
  873. brelse(bh);
  874. bh = NULL;
  875. if (len == 0)
  876. break;
  877. }
  878. cpos += num_clusters;
  879. }
  880. out:
  881. return ret;
  882. }
  883. static int ocfs2_xattr_ibody_get(struct inode *inode,
  884. int name_index,
  885. const char *name,
  886. void *buffer,
  887. size_t buffer_size,
  888. struct ocfs2_xattr_search *xs)
  889. {
  890. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  891. struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
  892. struct ocfs2_xattr_value_root *xv;
  893. size_t size;
  894. int ret = 0;
  895. if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
  896. return -ENODATA;
  897. xs->end = (void *)di + inode->i_sb->s_blocksize;
  898. xs->header = (struct ocfs2_xattr_header *)
  899. (xs->end - le16_to_cpu(di->i_xattr_inline_size));
  900. xs->base = (void *)xs->header;
  901. xs->here = xs->header->xh_entries;
  902. ret = ocfs2_xattr_find_entry(name_index, name, xs);
  903. if (ret)
  904. return ret;
  905. size = le64_to_cpu(xs->here->xe_value_size);
  906. if (buffer) {
  907. if (size > buffer_size)
  908. return -ERANGE;
  909. if (ocfs2_xattr_is_local(xs->here)) {
  910. memcpy(buffer, (void *)xs->base +
  911. le16_to_cpu(xs->here->xe_name_offset) +
  912. OCFS2_XATTR_SIZE(xs->here->xe_name_len), size);
  913. } else {
  914. xv = (struct ocfs2_xattr_value_root *)
  915. (xs->base + le16_to_cpu(
  916. xs->here->xe_name_offset) +
  917. OCFS2_XATTR_SIZE(xs->here->xe_name_len));
  918. ret = ocfs2_xattr_get_value_outside(inode, xv,
  919. buffer, size);
  920. if (ret < 0) {
  921. mlog_errno(ret);
  922. return ret;
  923. }
  924. }
  925. }
  926. return size;
  927. }
  928. static int ocfs2_xattr_block_get(struct inode *inode,
  929. int name_index,
  930. const char *name,
  931. void *buffer,
  932. size_t buffer_size,
  933. struct ocfs2_xattr_search *xs)
  934. {
  935. struct ocfs2_xattr_block *xb;
  936. struct ocfs2_xattr_value_root *xv;
  937. size_t size;
  938. int ret = -ENODATA, name_offset, name_len, i;
  939. int uninitialized_var(block_off);
  940. xs->bucket = ocfs2_xattr_bucket_new(inode);
  941. if (!xs->bucket) {
  942. ret = -ENOMEM;
  943. mlog_errno(ret);
  944. goto cleanup;
  945. }
  946. ret = ocfs2_xattr_block_find(inode, name_index, name, xs);
  947. if (ret) {
  948. mlog_errno(ret);
  949. goto cleanup;
  950. }
  951. if (xs->not_found) {
  952. ret = -ENODATA;
  953. goto cleanup;
  954. }
  955. xb = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
  956. size = le64_to_cpu(xs->here->xe_value_size);
  957. if (buffer) {
  958. ret = -ERANGE;
  959. if (size > buffer_size)
  960. goto cleanup;
  961. name_offset = le16_to_cpu(xs->here->xe_name_offset);
  962. name_len = OCFS2_XATTR_SIZE(xs->here->xe_name_len);
  963. i = xs->here - xs->header->xh_entries;
  964. if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
  965. ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
  966. bucket_xh(xs->bucket),
  967. i,
  968. &block_off,
  969. &name_offset);
  970. xs->base = bucket_block(xs->bucket, block_off);
  971. }
  972. if (ocfs2_xattr_is_local(xs->here)) {
  973. memcpy(buffer, (void *)xs->base +
  974. name_offset + name_len, size);
  975. } else {
  976. xv = (struct ocfs2_xattr_value_root *)
  977. (xs->base + name_offset + name_len);
  978. ret = ocfs2_xattr_get_value_outside(inode, xv,
  979. buffer, size);
  980. if (ret < 0) {
  981. mlog_errno(ret);
  982. goto cleanup;
  983. }
  984. }
  985. }
  986. ret = size;
  987. cleanup:
  988. ocfs2_xattr_bucket_free(xs->bucket);
  989. brelse(xs->xattr_bh);
  990. xs->xattr_bh = NULL;
  991. return ret;
  992. }
  993. int ocfs2_xattr_get_nolock(struct inode *inode,
  994. struct buffer_head *di_bh,
  995. int name_index,
  996. const char *name,
  997. void *buffer,
  998. size_t buffer_size)
  999. {
  1000. int ret;
  1001. struct ocfs2_dinode *di = NULL;
  1002. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  1003. struct ocfs2_xattr_search xis = {
  1004. .not_found = -ENODATA,
  1005. };
  1006. struct ocfs2_xattr_search xbs = {
  1007. .not_found = -ENODATA,
  1008. };
  1009. if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
  1010. return -EOPNOTSUPP;
  1011. if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
  1012. ret = -ENODATA;
  1013. xis.inode_bh = xbs.inode_bh = di_bh;
  1014. di = (struct ocfs2_dinode *)di_bh->b_data;
  1015. down_read(&oi->ip_xattr_sem);
  1016. ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
  1017. buffer_size, &xis);
  1018. if (ret == -ENODATA && di->i_xattr_loc)
  1019. ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
  1020. buffer_size, &xbs);
  1021. up_read(&oi->ip_xattr_sem);
  1022. return ret;
  1023. }
  1024. /* ocfs2_xattr_get()
  1025. *
  1026. * Copy an extended attribute into the buffer provided.
  1027. * Buffer is NULL to compute the size of buffer required.
  1028. */
  1029. static int ocfs2_xattr_get(struct inode *inode,
  1030. int name_index,
  1031. const char *name,
  1032. void *buffer,
  1033. size_t buffer_size)
  1034. {
  1035. int ret;
  1036. struct buffer_head *di_bh = NULL;
  1037. ret = ocfs2_inode_lock(inode, &di_bh, 0);
  1038. if (ret < 0) {
  1039. mlog_errno(ret);
  1040. return ret;
  1041. }
  1042. ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
  1043. name, buffer, buffer_size);
  1044. ocfs2_inode_unlock(inode, 0);
  1045. brelse(di_bh);
  1046. return ret;
  1047. }
  1048. static int __ocfs2_xattr_set_value_outside(struct inode *inode,
  1049. handle_t *handle,
  1050. struct ocfs2_xattr_value_buf *vb,
  1051. const void *value,
  1052. int value_len)
  1053. {
  1054. int ret = 0, i, cp_len;
  1055. u16 blocksize = inode->i_sb->s_blocksize;
  1056. u32 p_cluster, num_clusters;
  1057. u32 cpos = 0, bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
  1058. u32 clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_len);
  1059. u64 blkno;
  1060. struct buffer_head *bh = NULL;
  1061. unsigned int ext_flags;
  1062. struct ocfs2_xattr_value_root *xv = vb->vb_xv;
  1063. BUG_ON(clusters > le32_to_cpu(xv->xr_clusters));
  1064. while (cpos < clusters) {
  1065. ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
  1066. &num_clusters, &xv->xr_list,
  1067. &ext_flags);
  1068. if (ret) {
  1069. mlog_errno(ret);
  1070. goto out;
  1071. }
  1072. BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
  1073. blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
  1074. for (i = 0; i < num_clusters * bpc; i++, blkno++) {
  1075. ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
  1076. &bh, NULL);
  1077. if (ret) {
  1078. mlog_errno(ret);
  1079. goto out;
  1080. }
  1081. ret = ocfs2_journal_access(handle,
  1082. INODE_CACHE(inode),
  1083. bh,
  1084. OCFS2_JOURNAL_ACCESS_WRITE);
  1085. if (ret < 0) {
  1086. mlog_errno(ret);
  1087. goto out;
  1088. }
  1089. cp_len = value_len > blocksize ? blocksize : value_len;
  1090. memcpy(bh->b_data, value, cp_len);
  1091. value_len -= cp_len;
  1092. value += cp_len;
  1093. if (cp_len < blocksize)
  1094. memset(bh->b_data + cp_len, 0,
  1095. blocksize - cp_len);
  1096. ret = ocfs2_journal_dirty(handle, bh);
  1097. if (ret < 0) {
  1098. mlog_errno(ret);
  1099. goto out;
  1100. }
  1101. brelse(bh);
  1102. bh = NULL;
  1103. /*
  1104. * XXX: do we need to empty all the following
  1105. * blocks in this cluster?
  1106. */
  1107. if (!value_len)
  1108. break;
  1109. }
  1110. cpos += num_clusters;
  1111. }
  1112. out:
  1113. brelse(bh);
  1114. return ret;
  1115. }
  1116. static int ocfs2_xattr_cleanup(struct inode *inode,
  1117. handle_t *handle,
  1118. struct ocfs2_xattr_info *xi,
  1119. struct ocfs2_xattr_search *xs,
  1120. struct ocfs2_xattr_value_buf *vb,
  1121. size_t offs)
  1122. {
  1123. int ret = 0;
  1124. size_t name_len = strlen(xi->name);
  1125. void *val = xs->base + offs;
  1126. size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
  1127. ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
  1128. OCFS2_JOURNAL_ACCESS_WRITE);
  1129. if (ret) {
  1130. mlog_errno(ret);
  1131. goto out;
  1132. }
  1133. /* Decrease xattr count */
  1134. le16_add_cpu(&xs->header->xh_count, -1);
  1135. /* Remove the xattr entry and tree root which has already be set*/
  1136. memset((void *)xs->here, 0, sizeof(struct ocfs2_xattr_entry));
  1137. memset(val, 0, size);
  1138. ret = ocfs2_journal_dirty(handle, vb->vb_bh);
  1139. if (ret < 0)
  1140. mlog_errno(ret);
  1141. out:
  1142. return ret;
  1143. }
  1144. static int ocfs2_xattr_update_entry(struct inode *inode,
  1145. handle_t *handle,
  1146. struct ocfs2_xattr_info *xi,
  1147. struct ocfs2_xattr_search *xs,
  1148. struct ocfs2_xattr_value_buf *vb,
  1149. size_t offs)
  1150. {
  1151. int ret;
  1152. ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
  1153. OCFS2_JOURNAL_ACCESS_WRITE);
  1154. if (ret) {
  1155. mlog_errno(ret);
  1156. goto out;
  1157. }
  1158. xs->here->xe_name_offset = cpu_to_le16(offs);
  1159. xs->here->xe_value_size = cpu_to_le64(xi->value_len);
  1160. if (xi->value_len <= OCFS2_XATTR_INLINE_SIZE)
  1161. ocfs2_xattr_set_local(xs->here, 1);
  1162. else
  1163. ocfs2_xattr_set_local(xs->here, 0);
  1164. ocfs2_xattr_hash_entry(inode, xs->header, xs->here);
  1165. ret = ocfs2_journal_dirty(handle, vb->vb_bh);
  1166. if (ret < 0)
  1167. mlog_errno(ret);
  1168. out:
  1169. return ret;
  1170. }
  1171. /*
  1172. * ocfs2_xattr_set_value_outside()
  1173. *
  1174. * Set large size value in B tree.
  1175. */
  1176. static int ocfs2_xattr_set_value_outside(struct inode *inode,
  1177. struct ocfs2_xattr_info *xi,
  1178. struct ocfs2_xattr_search *xs,
  1179. struct ocfs2_xattr_set_ctxt *ctxt,
  1180. struct ocfs2_xattr_value_buf *vb,
  1181. size_t offs)
  1182. {
  1183. size_t name_len = strlen(xi->name);
  1184. void *val = xs->base + offs;
  1185. struct ocfs2_xattr_value_root *xv = NULL;
  1186. size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
  1187. int ret = 0;
  1188. memset(val, 0, size);
  1189. memcpy(val, xi->name, name_len);
  1190. xv = (struct ocfs2_xattr_value_root *)
  1191. (val + OCFS2_XATTR_SIZE(name_len));
  1192. xv->xr_clusters = 0;
  1193. xv->xr_last_eb_blk = 0;
  1194. xv->xr_list.l_tree_depth = 0;
  1195. xv->xr_list.l_count = cpu_to_le16(1);
  1196. xv->xr_list.l_next_free_rec = 0;
  1197. vb->vb_xv = xv;
  1198. ret = ocfs2_xattr_value_truncate(inode, vb, xi->value_len, ctxt);
  1199. if (ret < 0) {
  1200. mlog_errno(ret);
  1201. return ret;
  1202. }
  1203. ret = ocfs2_xattr_update_entry(inode, ctxt->handle, xi, xs, vb, offs);
  1204. if (ret < 0) {
  1205. mlog_errno(ret);
  1206. return ret;
  1207. }
  1208. ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb,
  1209. xi->value, xi->value_len);
  1210. if (ret < 0)
  1211. mlog_errno(ret);
  1212. return ret;
  1213. }
  1214. /*
  1215. * ocfs2_xattr_set_entry_local()
  1216. *
  1217. * Set, replace or remove extended attribute in local.
  1218. */
  1219. static void ocfs2_xattr_set_entry_local(struct inode *inode,
  1220. struct ocfs2_xattr_info *xi,
  1221. struct ocfs2_xattr_search *xs,
  1222. struct ocfs2_xattr_entry *last,
  1223. size_t min_offs)
  1224. {
  1225. size_t name_len = strlen(xi->name);
  1226. int i;
  1227. if (xi->value && xs->not_found) {
  1228. /* Insert the new xattr entry. */
  1229. le16_add_cpu(&xs->header->xh_count, 1);
  1230. ocfs2_xattr_set_type(last, xi->name_index);
  1231. ocfs2_xattr_set_local(last, 1);
  1232. last->xe_name_len = name_len;
  1233. } else {
  1234. void *first_val;
  1235. void *val;
  1236. size_t offs, size;
  1237. first_val = xs->base + min_offs;
  1238. offs = le16_to_cpu(xs->here->xe_name_offset);
  1239. val = xs->base + offs;
  1240. if (le64_to_cpu(xs->here->xe_value_size) >
  1241. OCFS2_XATTR_INLINE_SIZE)
  1242. size = OCFS2_XATTR_SIZE(name_len) +
  1243. OCFS2_XATTR_ROOT_SIZE;
  1244. else
  1245. size = OCFS2_XATTR_SIZE(name_len) +
  1246. OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
  1247. if (xi->value && size == OCFS2_XATTR_SIZE(name_len) +
  1248. OCFS2_XATTR_SIZE(xi->value_len)) {
  1249. /* The old and the new value have the
  1250. same size. Just replace the value. */
  1251. ocfs2_xattr_set_local(xs->here, 1);
  1252. xs->here->xe_value_size = cpu_to_le64(xi->value_len);
  1253. /* Clear value bytes. */
  1254. memset(val + OCFS2_XATTR_SIZE(name_len),
  1255. 0,
  1256. OCFS2_XATTR_SIZE(xi->value_len));
  1257. memcpy(val + OCFS2_XATTR_SIZE(name_len),
  1258. xi->value,
  1259. xi->value_len);
  1260. return;
  1261. }
  1262. /* Remove the old name+value. */
  1263. memmove(first_val + size, first_val, val - first_val);
  1264. memset(first_val, 0, size);
  1265. xs->here->xe_name_hash = 0;
  1266. xs->here->xe_name_offset = 0;
  1267. ocfs2_xattr_set_local(xs->here, 1);
  1268. xs->here->xe_value_size = 0;
  1269. min_offs += size;
  1270. /* Adjust all value offsets. */
  1271. last = xs->header->xh_entries;
  1272. for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) {
  1273. size_t o = le16_to_cpu(last->xe_name_offset);
  1274. if (o < offs)
  1275. last->xe_name_offset = cpu_to_le16(o + size);
  1276. last += 1;
  1277. }
  1278. if (!xi->value) {
  1279. /* Remove the old entry. */
  1280. last -= 1;
  1281. memmove(xs->here, xs->here + 1,
  1282. (void *)last - (void *)xs->here);
  1283. memset(last, 0, sizeof(struct ocfs2_xattr_entry));
  1284. le16_add_cpu(&xs->header->xh_count, -1);
  1285. }
  1286. }
  1287. if (xi->value) {
  1288. /* Insert the new name+value. */
  1289. size_t size = OCFS2_XATTR_SIZE(name_len) +
  1290. OCFS2_XATTR_SIZE(xi->value_len);
  1291. void *val = xs->base + min_offs - size;
  1292. xs->here->xe_name_offset = cpu_to_le16(min_offs - size);
  1293. memset(val, 0, size);
  1294. memcpy(val, xi->name, name_len);
  1295. memcpy(val + OCFS2_XATTR_SIZE(name_len),
  1296. xi->value,
  1297. xi->value_len);
  1298. xs->here->xe_value_size = cpu_to_le64(xi->value_len);
  1299. ocfs2_xattr_set_local(xs->here, 1);
  1300. ocfs2_xattr_hash_entry(inode, xs->header, xs->here);
  1301. }
  1302. return;
  1303. }
  1304. /*
  1305. * ocfs2_xattr_set_entry()
  1306. *
  1307. * Set extended attribute entry into inode or block.
  1308. *
  1309. * If extended attribute value size > OCFS2_XATTR_INLINE_SIZE,
  1310. * We first insert tree root(ocfs2_xattr_value_root) with set_entry_local(),
  1311. * then set value in B tree with set_value_outside().
  1312. */
  1313. static int ocfs2_xattr_set_entry(struct inode *inode,
  1314. struct ocfs2_xattr_info *xi,
  1315. struct ocfs2_xattr_search *xs,
  1316. struct ocfs2_xattr_set_ctxt *ctxt,
  1317. int flag)
  1318. {
  1319. struct ocfs2_xattr_entry *last;
  1320. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  1321. struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
  1322. size_t min_offs = xs->end - xs->base, name_len = strlen(xi->name);
  1323. size_t size_l = 0;
  1324. handle_t *handle = ctxt->handle;
  1325. int free, i, ret;
  1326. struct ocfs2_xattr_info xi_l = {
  1327. .name_index = xi->name_index,
  1328. .name = xi->name,
  1329. .value = xi->value,
  1330. .value_len = xi->value_len,
  1331. };
  1332. struct ocfs2_xattr_value_buf vb = {
  1333. .vb_bh = xs->xattr_bh,
  1334. .vb_access = ocfs2_journal_access_di,
  1335. };
  1336. if (!(flag & OCFS2_INLINE_XATTR_FL)) {
  1337. BUG_ON(xs->xattr_bh == xs->inode_bh);
  1338. vb.vb_access = ocfs2_journal_access_xb;
  1339. } else
  1340. BUG_ON(xs->xattr_bh != xs->inode_bh);
  1341. /* Compute min_offs, last and free space. */
  1342. last = xs->header->xh_entries;
  1343. for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) {
  1344. size_t offs = le16_to_cpu(last->xe_name_offset);
  1345. if (offs < min_offs)
  1346. min_offs = offs;
  1347. last += 1;
  1348. }
  1349. free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP;
  1350. if (free < 0)
  1351. return -EIO;
  1352. if (!xs->not_found) {
  1353. size_t size = 0;
  1354. if (ocfs2_xattr_is_local(xs->here))
  1355. size = OCFS2_XATTR_SIZE(name_len) +
  1356. OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
  1357. else
  1358. size = OCFS2_XATTR_SIZE(name_len) +
  1359. OCFS2_XATTR_ROOT_SIZE;
  1360. free += (size + sizeof(struct ocfs2_xattr_entry));
  1361. }
  1362. /* Check free space in inode or block */
  1363. if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
  1364. if (free < sizeof(struct ocfs2_xattr_entry) +
  1365. OCFS2_XATTR_SIZE(name_len) +
  1366. OCFS2_XATTR_ROOT_SIZE) {
  1367. ret = -ENOSPC;
  1368. goto out;
  1369. }
  1370. size_l = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
  1371. xi_l.value = (void *)&def_xv;
  1372. xi_l.value_len = OCFS2_XATTR_ROOT_SIZE;
  1373. } else if (xi->value) {
  1374. if (free < sizeof(struct ocfs2_xattr_entry) +
  1375. OCFS2_XATTR_SIZE(name_len) +
  1376. OCFS2_XATTR_SIZE(xi->value_len)) {
  1377. ret = -ENOSPC;
  1378. goto out;
  1379. }
  1380. }
  1381. if (!xs->not_found) {
  1382. /* For existing extended attribute */
  1383. size_t size = OCFS2_XATTR_SIZE(name_len) +
  1384. OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
  1385. size_t offs = le16_to_cpu(xs->here->xe_name_offset);
  1386. void *val = xs->base + offs;
  1387. if (ocfs2_xattr_is_local(xs->here) && size == size_l) {
  1388. /* Replace existing local xattr with tree root */
  1389. ret = ocfs2_xattr_set_value_outside(inode, xi, xs,
  1390. ctxt, &vb, offs);
  1391. if (ret < 0)
  1392. mlog_errno(ret);
  1393. goto out;
  1394. } else if (!ocfs2_xattr_is_local(xs->here)) {
  1395. /* For existing xattr which has value outside */
  1396. vb.vb_xv = (struct ocfs2_xattr_value_root *)
  1397. (val + OCFS2_XATTR_SIZE(name_len));
  1398. if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
  1399. /*
  1400. * If new value need set outside also,
  1401. * first truncate old value to new value,
  1402. * then set new value with set_value_outside().
  1403. */
  1404. ret = ocfs2_xattr_value_truncate(inode,
  1405. &vb,
  1406. xi->value_len,
  1407. ctxt);
  1408. if (ret < 0) {
  1409. mlog_errno(ret);
  1410. goto out;
  1411. }
  1412. ret = ocfs2_xattr_update_entry(inode,
  1413. handle,
  1414. xi,
  1415. xs,
  1416. &vb,
  1417. offs);
  1418. if (ret < 0) {
  1419. mlog_errno(ret);
  1420. goto out;
  1421. }
  1422. ret = __ocfs2_xattr_set_value_outside(inode,
  1423. handle,
  1424. &vb,
  1425. xi->value,
  1426. xi->value_len);
  1427. if (ret < 0)
  1428. mlog_errno(ret);
  1429. goto out;
  1430. } else {
  1431. /*
  1432. * If new value need set in local,
  1433. * just trucate old value to zero.
  1434. */
  1435. ret = ocfs2_xattr_value_truncate(inode,
  1436. &vb,
  1437. 0,
  1438. ctxt);
  1439. if (ret < 0)
  1440. mlog_errno(ret);
  1441. }
  1442. }
  1443. }
  1444. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), xs->inode_bh,
  1445. OCFS2_JOURNAL_ACCESS_WRITE);
  1446. if (ret) {
  1447. mlog_errno(ret);
  1448. goto out;
  1449. }
  1450. if (!(flag & OCFS2_INLINE_XATTR_FL)) {
  1451. ret = vb.vb_access(handle, INODE_CACHE(inode), vb.vb_bh,
  1452. OCFS2_JOURNAL_ACCESS_WRITE);
  1453. if (ret) {
  1454. mlog_errno(ret);
  1455. goto out;
  1456. }
  1457. }
  1458. /*
  1459. * Set value in local, include set tree root in local.
  1460. * This is the first step for value size >INLINE_SIZE.
  1461. */
  1462. ocfs2_xattr_set_entry_local(inode, &xi_l, xs, last, min_offs);
  1463. if (!(flag & OCFS2_INLINE_XATTR_FL)) {
  1464. ret = ocfs2_journal_dirty(handle, xs->xattr_bh);
  1465. if (ret < 0) {
  1466. mlog_errno(ret);
  1467. goto out;
  1468. }
  1469. }
  1470. if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) &&
  1471. (flag & OCFS2_INLINE_XATTR_FL)) {
  1472. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1473. unsigned int xattrsize = osb->s_xattr_inline_size;
  1474. /*
  1475. * Adjust extent record count or inline data size
  1476. * to reserve space for extended attribute.
  1477. */
  1478. if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
  1479. struct ocfs2_inline_data *idata = &di->id2.i_data;
  1480. le16_add_cpu(&idata->id_count, -xattrsize);
  1481. } else if (!(ocfs2_inode_is_fast_symlink(inode))) {
  1482. struct ocfs2_extent_list *el = &di->id2.i_list;
  1483. le16_add_cpu(&el->l_count, -(xattrsize /
  1484. sizeof(struct ocfs2_extent_rec)));
  1485. }
  1486. di->i_xattr_inline_size = cpu_to_le16(xattrsize);
  1487. }
  1488. /* Update xattr flag */
  1489. spin_lock(&oi->ip_lock);
  1490. oi->ip_dyn_features |= flag;
  1491. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  1492. spin_unlock(&oi->ip_lock);
  1493. ret = ocfs2_journal_dirty(handle, xs->inode_bh);
  1494. if (ret < 0)
  1495. mlog_errno(ret);
  1496. if (!ret && xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
  1497. /*
  1498. * Set value outside in B tree.
  1499. * This is the second step for value size > INLINE_SIZE.
  1500. */
  1501. size_t offs = le16_to_cpu(xs->here->xe_name_offset);
  1502. ret = ocfs2_xattr_set_value_outside(inode, xi, xs, ctxt,
  1503. &vb, offs);
  1504. if (ret < 0) {
  1505. int ret2;
  1506. mlog_errno(ret);
  1507. /*
  1508. * If set value outside failed, we have to clean
  1509. * the junk tree root we have already set in local.
  1510. */
  1511. ret2 = ocfs2_xattr_cleanup(inode, ctxt->handle,
  1512. xi, xs, &vb, offs);
  1513. if (ret2 < 0)
  1514. mlog_errno(ret2);
  1515. }
  1516. }
  1517. out:
  1518. return ret;
  1519. }
  1520. /*
  1521. * In xattr remove, if it is stored outside and refcounted, we may have
  1522. * the chance to split the refcount tree. So need the allocators.
  1523. */
  1524. static int ocfs2_lock_xattr_remove_allocators(struct inode *inode,
  1525. struct ocfs2_xattr_value_root *xv,
  1526. struct ocfs2_caching_info *ref_ci,
  1527. struct buffer_head *ref_root_bh,
  1528. struct ocfs2_alloc_context **meta_ac,
  1529. int *ref_credits)
  1530. {
  1531. int ret, meta_add = 0;
  1532. u32 p_cluster, num_clusters;
  1533. unsigned int ext_flags;
  1534. *ref_credits = 0;
  1535. ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
  1536. &num_clusters,
  1537. &xv->xr_list,
  1538. &ext_flags);
  1539. if (ret) {
  1540. mlog_errno(ret);
  1541. goto out;
  1542. }
  1543. if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
  1544. goto out;
  1545. ret = ocfs2_refcounted_xattr_delete_need(inode, ref_ci,
  1546. ref_root_bh, xv,
  1547. &meta_add, ref_credits);
  1548. if (ret) {
  1549. mlog_errno(ret);
  1550. goto out;
  1551. }
  1552. ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
  1553. meta_add, meta_ac);
  1554. if (ret)
  1555. mlog_errno(ret);
  1556. out:
  1557. return ret;
  1558. }
  1559. static int ocfs2_remove_value_outside(struct inode*inode,
  1560. struct ocfs2_xattr_value_buf *vb,
  1561. struct ocfs2_xattr_header *header,
  1562. struct ocfs2_caching_info *ref_ci,
  1563. struct buffer_head *ref_root_bh)
  1564. {
  1565. int ret = 0, i, ref_credits;
  1566. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1567. struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, };
  1568. void *val;
  1569. ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
  1570. for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
  1571. struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
  1572. if (ocfs2_xattr_is_local(entry))
  1573. continue;
  1574. val = (void *)header +
  1575. le16_to_cpu(entry->xe_name_offset);
  1576. vb->vb_xv = (struct ocfs2_xattr_value_root *)
  1577. (val + OCFS2_XATTR_SIZE(entry->xe_name_len));
  1578. ret = ocfs2_lock_xattr_remove_allocators(inode, vb->vb_xv,
  1579. ref_ci, ref_root_bh,
  1580. &ctxt.meta_ac,
  1581. &ref_credits);
  1582. ctxt.handle = ocfs2_start_trans(osb, ref_credits +
  1583. ocfs2_remove_extent_credits(osb->sb));
  1584. if (IS_ERR(ctxt.handle)) {
  1585. ret = PTR_ERR(ctxt.handle);
  1586. mlog_errno(ret);
  1587. break;
  1588. }
  1589. ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
  1590. if (ret < 0) {
  1591. mlog_errno(ret);
  1592. break;
  1593. }
  1594. ocfs2_commit_trans(osb, ctxt.handle);
  1595. if (ctxt.meta_ac) {
  1596. ocfs2_free_alloc_context(ctxt.meta_ac);
  1597. ctxt.meta_ac = NULL;
  1598. }
  1599. }
  1600. if (ctxt.meta_ac)
  1601. ocfs2_free_alloc_context(ctxt.meta_ac);
  1602. ocfs2_schedule_truncate_log_flush(osb, 1);
  1603. ocfs2_run_deallocs(osb, &ctxt.dealloc);
  1604. return ret;
  1605. }
  1606. static int ocfs2_xattr_ibody_remove(struct inode *inode,
  1607. struct buffer_head *di_bh,
  1608. struct ocfs2_caching_info *ref_ci,
  1609. struct buffer_head *ref_root_bh)
  1610. {
  1611. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  1612. struct ocfs2_xattr_header *header;
  1613. int ret;
  1614. struct ocfs2_xattr_value_buf vb = {
  1615. .vb_bh = di_bh,
  1616. .vb_access = ocfs2_journal_access_di,
  1617. };
  1618. header = (struct ocfs2_xattr_header *)
  1619. ((void *)di + inode->i_sb->s_blocksize -
  1620. le16_to_cpu(di->i_xattr_inline_size));
  1621. ret = ocfs2_remove_value_outside(inode, &vb, header,
  1622. ref_ci, ref_root_bh);
  1623. return ret;
  1624. }
  1625. struct ocfs2_rm_xattr_bucket_para {
  1626. struct ocfs2_caching_info *ref_ci;
  1627. struct buffer_head *ref_root_bh;
  1628. };
  1629. static int ocfs2_xattr_block_remove(struct inode *inode,
  1630. struct buffer_head *blk_bh,
  1631. struct ocfs2_caching_info *ref_ci,
  1632. struct buffer_head *ref_root_bh)
  1633. {
  1634. struct ocfs2_xattr_block *xb;
  1635. int ret = 0;
  1636. struct ocfs2_xattr_value_buf vb = {
  1637. .vb_bh = blk_bh,
  1638. .vb_access = ocfs2_journal_access_xb,
  1639. };
  1640. struct ocfs2_rm_xattr_bucket_para args = {
  1641. .ref_ci = ref_ci,
  1642. .ref_root_bh = ref_root_bh,
  1643. };
  1644. xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
  1645. if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
  1646. struct ocfs2_xattr_header *header = &(xb->xb_attrs.xb_header);
  1647. ret = ocfs2_remove_value_outside(inode, &vb, header,
  1648. ref_ci, ref_root_bh);
  1649. } else
  1650. ret = ocfs2_iterate_xattr_index_block(inode,
  1651. blk_bh,
  1652. ocfs2_rm_xattr_cluster,
  1653. &args);
  1654. return ret;
  1655. }
  1656. static int ocfs2_xattr_free_block(struct inode *inode,
  1657. u64 block,
  1658. struct ocfs2_caching_info *ref_ci,
  1659. struct buffer_head *ref_root_bh)
  1660. {
  1661. struct inode *xb_alloc_inode;
  1662. struct buffer_head *xb_alloc_bh = NULL;
  1663. struct buffer_head *blk_bh = NULL;
  1664. struct ocfs2_xattr_block *xb;
  1665. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1666. handle_t *handle;
  1667. int ret = 0;
  1668. u64 blk, bg_blkno;
  1669. u16 bit;
  1670. ret = ocfs2_read_xattr_block(inode, block, &blk_bh);
  1671. if (ret < 0) {
  1672. mlog_errno(ret);
  1673. goto out;
  1674. }
  1675. ret = ocfs2_xattr_block_remove(inode, blk_bh, ref_ci, ref_root_bh);
  1676. if (ret < 0) {
  1677. mlog_errno(ret);
  1678. goto out;
  1679. }
  1680. xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
  1681. blk = le64_to_cpu(xb->xb_blkno);
  1682. bit = le16_to_cpu(xb->xb_suballoc_bit);
  1683. bg_blkno = ocfs2_which_suballoc_group(blk, bit);
  1684. xb_alloc_inode = ocfs2_get_system_file_inode(osb,
  1685. EXTENT_ALLOC_SYSTEM_INODE,
  1686. le16_to_cpu(xb->xb_suballoc_slot));
  1687. if (!xb_alloc_inode) {
  1688. ret = -ENOMEM;
  1689. mlog_errno(ret);
  1690. goto out;
  1691. }
  1692. mutex_lock(&xb_alloc_inode->i_mutex);
  1693. ret = ocfs2_inode_lock(xb_alloc_inode, &xb_alloc_bh, 1);
  1694. if (ret < 0) {
  1695. mlog_errno(ret);
  1696. goto out_mutex;
  1697. }
  1698. handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
  1699. if (IS_ERR(handle)) {
  1700. ret = PTR_ERR(handle);
  1701. mlog_errno(ret);
  1702. goto out_unlock;
  1703. }
  1704. ret = ocfs2_free_suballoc_bits(handle, xb_alloc_inode, xb_alloc_bh,
  1705. bit, bg_blkno, 1);
  1706. if (ret < 0)
  1707. mlog_errno(ret);
  1708. ocfs2_commit_trans(osb, handle);
  1709. out_unlock:
  1710. ocfs2_inode_unlock(xb_alloc_inode, 1);
  1711. brelse(xb_alloc_bh);
  1712. out_mutex:
  1713. mutex_unlock(&xb_alloc_inode->i_mutex);
  1714. iput(xb_alloc_inode);
  1715. out:
  1716. brelse(blk_bh);
  1717. return ret;
  1718. }
  1719. /*
  1720. * ocfs2_xattr_remove()
  1721. *
  1722. * Free extended attribute resources associated with this inode.
  1723. */
  1724. int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh)
  1725. {
  1726. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  1727. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  1728. struct ocfs2_refcount_tree *ref_tree = NULL;
  1729. struct buffer_head *ref_root_bh = NULL;
  1730. struct ocfs2_caching_info *ref_ci = NULL;
  1731. handle_t *handle;
  1732. int ret;
  1733. if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
  1734. return 0;
  1735. if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
  1736. return 0;
  1737. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
  1738. ret = ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb),
  1739. le64_to_cpu(di->i_refcount_loc),
  1740. 1, &ref_tree, &ref_root_bh);
  1741. if (ret) {
  1742. mlog_errno(ret);
  1743. goto out;
  1744. }
  1745. ref_ci = &ref_tree->rf_ci;
  1746. }
  1747. if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
  1748. ret = ocfs2_xattr_ibody_remove(inode, di_bh,
  1749. ref_ci, ref_root_bh);
  1750. if (ret < 0) {
  1751. mlog_errno(ret);
  1752. goto out;
  1753. }
  1754. }
  1755. if (di->i_xattr_loc) {
  1756. ret = ocfs2_xattr_free_block(inode,
  1757. le64_to_cpu(di->i_xattr_loc),
  1758. ref_ci, ref_root_bh);
  1759. if (ret < 0) {
  1760. mlog_errno(ret);
  1761. goto out;
  1762. }
  1763. }
  1764. handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)),
  1765. OCFS2_INODE_UPDATE_CREDITS);
  1766. if (IS_ERR(handle)) {
  1767. ret = PTR_ERR(handle);
  1768. mlog_errno(ret);
  1769. goto out;
  1770. }
  1771. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  1772. OCFS2_JOURNAL_ACCESS_WRITE);
  1773. if (ret) {
  1774. mlog_errno(ret);
  1775. goto out_commit;
  1776. }
  1777. di->i_xattr_loc = 0;
  1778. spin_lock(&oi->ip_lock);
  1779. oi->ip_dyn_features &= ~(OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL);
  1780. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  1781. spin_unlock(&oi->ip_lock);
  1782. ret = ocfs2_journal_dirty(handle, di_bh);
  1783. if (ret < 0)
  1784. mlog_errno(ret);
  1785. out_commit:
  1786. ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
  1787. out:
  1788. if (ref_tree)
  1789. ocfs2_unlock_refcount_tree(OCFS2_SB(inode->i_sb), ref_tree, 1);
  1790. brelse(ref_root_bh);
  1791. return ret;
  1792. }
  1793. static int ocfs2_xattr_has_space_inline(struct inode *inode,
  1794. struct ocfs2_dinode *di)
  1795. {
  1796. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  1797. unsigned int xattrsize = OCFS2_SB(inode->i_sb)->s_xattr_inline_size;
  1798. int free;
  1799. if (xattrsize < OCFS2_MIN_XATTR_INLINE_SIZE)
  1800. return 0;
  1801. if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
  1802. struct ocfs2_inline_data *idata = &di->id2.i_data;
  1803. free = le16_to_cpu(idata->id_count) - le64_to_cpu(di->i_size);
  1804. } else if (ocfs2_inode_is_fast_symlink(inode)) {
  1805. free = ocfs2_fast_symlink_chars(inode->i_sb) -
  1806. le64_to_cpu(di->i_size);
  1807. } else {
  1808. struct ocfs2_extent_list *el = &di->id2.i_list;
  1809. free = (le16_to_cpu(el->l_count) -
  1810. le16_to_cpu(el->l_next_free_rec)) *
  1811. sizeof(struct ocfs2_extent_rec);
  1812. }
  1813. if (free >= xattrsize)
  1814. return 1;
  1815. return 0;
  1816. }
  1817. /*
  1818. * ocfs2_xattr_ibody_find()
  1819. *
  1820. * Find extended attribute in inode block and
  1821. * fill search info into struct ocfs2_xattr_search.
  1822. */
  1823. static int ocfs2_xattr_ibody_find(struct inode *inode,
  1824. int name_index,
  1825. const char *name,
  1826. struct ocfs2_xattr_search *xs)
  1827. {
  1828. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  1829. struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
  1830. int ret;
  1831. int has_space = 0;
  1832. if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
  1833. return 0;
  1834. if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
  1835. down_read(&oi->ip_alloc_sem);
  1836. has_space = ocfs2_xattr_has_space_inline(inode, di);
  1837. up_read(&oi->ip_alloc_sem);
  1838. if (!has_space)
  1839. return 0;
  1840. }
  1841. xs->xattr_bh = xs->inode_bh;
  1842. xs->end = (void *)di + inode->i_sb->s_blocksize;
  1843. if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)
  1844. xs->header = (struct ocfs2_xattr_header *)
  1845. (xs->end - le16_to_cpu(di->i_xattr_inline_size));
  1846. else
  1847. xs->header = (struct ocfs2_xattr_header *)
  1848. (xs->end - OCFS2_SB(inode->i_sb)->s_xattr_inline_size);
  1849. xs->base = (void *)xs->header;
  1850. xs->here = xs->header->xh_entries;
  1851. /* Find the named attribute. */
  1852. if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
  1853. ret = ocfs2_xattr_find_entry(name_index, name, xs);
  1854. if (ret && ret != -ENODATA)
  1855. return ret;
  1856. xs->not_found = ret;
  1857. }
  1858. return 0;
  1859. }
  1860. /*
  1861. * ocfs2_xattr_ibody_set()
  1862. *
  1863. * Set, replace or remove an extended attribute into inode block.
  1864. *
  1865. */
  1866. static int ocfs2_xattr_ibody_set(struct inode *inode,
  1867. struct ocfs2_xattr_info *xi,
  1868. struct ocfs2_xattr_search *xs,
  1869. struct ocfs2_xattr_set_ctxt *ctxt)
  1870. {
  1871. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  1872. struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
  1873. int ret;
  1874. if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
  1875. return -ENOSPC;
  1876. down_write(&oi->ip_alloc_sem);
  1877. if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
  1878. if (!ocfs2_xattr_has_space_inline(inode, di)) {
  1879. ret = -ENOSPC;
  1880. goto out;
  1881. }
  1882. }
  1883. ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt,
  1884. (OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL));
  1885. out:
  1886. up_write(&oi->ip_alloc_sem);
  1887. return ret;
  1888. }
  1889. /*
  1890. * ocfs2_xattr_block_find()
  1891. *
  1892. * Find extended attribute in external block and
  1893. * fill search info into struct ocfs2_xattr_search.
  1894. */
  1895. static int ocfs2_xattr_block_find(struct inode *inode,
  1896. int name_index,
  1897. const char *name,
  1898. struct ocfs2_xattr_search *xs)
  1899. {
  1900. struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
  1901. struct buffer_head *blk_bh = NULL;
  1902. struct ocfs2_xattr_block *xb;
  1903. int ret = 0;
  1904. if (!di->i_xattr_loc)
  1905. return ret;
  1906. ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
  1907. &blk_bh);
  1908. if (ret < 0) {
  1909. mlog_errno(ret);
  1910. return ret;
  1911. }
  1912. xs->xattr_bh = blk_bh;
  1913. xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
  1914. if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
  1915. xs->header = &xb->xb_attrs.xb_header;
  1916. xs->base = (void *)xs->header;
  1917. xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size;
  1918. xs->here = xs->header->xh_entries;
  1919. ret = ocfs2_xattr_find_entry(name_index, name, xs);
  1920. } else
  1921. ret = ocfs2_xattr_index_block_find(inode, blk_bh,
  1922. name_index,
  1923. name, xs);
  1924. if (ret && ret != -ENODATA) {
  1925. xs->xattr_bh = NULL;
  1926. goto cleanup;
  1927. }
  1928. xs->not_found = ret;
  1929. return 0;
  1930. cleanup:
  1931. brelse(blk_bh);
  1932. return ret;
  1933. }
  1934. static int ocfs2_create_xattr_block(handle_t *handle,
  1935. struct inode *inode,
  1936. struct buffer_head *inode_bh,
  1937. struct ocfs2_alloc_context *meta_ac,
  1938. struct buffer_head **ret_bh,
  1939. int indexed)
  1940. {
  1941. int ret;
  1942. u16 suballoc_bit_start;
  1943. u32 num_got;
  1944. u64 first_blkno;
  1945. struct ocfs2_dinode *di = (struct ocfs2_dinode *)inode_bh->b_data;
  1946. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1947. struct buffer_head *new_bh = NULL;
  1948. struct ocfs2_xattr_block *xblk;
  1949. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), inode_bh,
  1950. OCFS2_JOURNAL_ACCESS_CREATE);
  1951. if (ret < 0) {
  1952. mlog_errno(ret);
  1953. goto end;
  1954. }
  1955. ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
  1956. &suballoc_bit_start, &num_got,
  1957. &first_blkno);
  1958. if (ret < 0) {
  1959. mlog_errno(ret);
  1960. goto end;
  1961. }
  1962. new_bh = sb_getblk(inode->i_sb, first_blkno);
  1963. ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
  1964. ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode),
  1965. new_bh,
  1966. OCFS2_JOURNAL_ACCESS_CREATE);
  1967. if (ret < 0) {
  1968. mlog_errno(ret);
  1969. goto end;
  1970. }
  1971. /* Initialize ocfs2_xattr_block */
  1972. xblk = (struct ocfs2_xattr_block *)new_bh->b_data;
  1973. memset(xblk, 0, inode->i_sb->s_blocksize);
  1974. strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE);
  1975. xblk->xb_suballoc_slot = cpu_to_le16(osb->slot_num);
  1976. xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  1977. xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation);
  1978. xblk->xb_blkno = cpu_to_le64(first_blkno);
  1979. if (indexed) {
  1980. struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root;
  1981. xr->xt_clusters = cpu_to_le32(1);
  1982. xr->xt_last_eb_blk = 0;
  1983. xr->xt_list.l_tree_depth = 0;
  1984. xr->xt_list.l_count = cpu_to_le16(
  1985. ocfs2_xattr_recs_per_xb(inode->i_sb));
  1986. xr->xt_list.l_next_free_rec = cpu_to_le16(1);
  1987. xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED);
  1988. }
  1989. ret = ocfs2_journal_dirty(handle, new_bh);
  1990. if (ret < 0) {
  1991. mlog_errno(ret);
  1992. goto end;
  1993. }
  1994. di->i_xattr_loc = cpu_to_le64(first_blkno);
  1995. ocfs2_journal_dirty(handle, inode_bh);
  1996. *ret_bh = new_bh;
  1997. new_bh = NULL;
  1998. end:
  1999. brelse(new_bh);
  2000. return ret;
  2001. }
  2002. /*
  2003. * ocfs2_xattr_block_set()
  2004. *
  2005. * Set, replace or remove an extended attribute into external block.
  2006. *
  2007. */
  2008. static int ocfs2_xattr_block_set(struct inode *inode,
  2009. struct ocfs2_xattr_info *xi,
  2010. struct ocfs2_xattr_search *xs,
  2011. struct ocfs2_xattr_set_ctxt *ctxt)
  2012. {
  2013. struct buffer_head *new_bh = NULL;
  2014. handle_t *handle = ctxt->handle;
  2015. struct ocfs2_xattr_block *xblk = NULL;
  2016. int ret;
  2017. if (!xs->xattr_bh) {
  2018. ret = ocfs2_create_xattr_block(handle, inode, xs->inode_bh,
  2019. ctxt->meta_ac, &new_bh, 0);
  2020. if (ret) {
  2021. mlog_errno(ret);
  2022. goto end;
  2023. }
  2024. xs->xattr_bh = new_bh;
  2025. xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
  2026. xs->header = &xblk->xb_attrs.xb_header;
  2027. xs->base = (void *)xs->header;
  2028. xs->end = (void *)xblk + inode->i_sb->s_blocksize;
  2029. xs->here = xs->header->xh_entries;
  2030. } else
  2031. xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
  2032. if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) {
  2033. /* Set extended attribute into external block */
  2034. ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt,
  2035. OCFS2_HAS_XATTR_FL);
  2036. if (!ret || ret != -ENOSPC)
  2037. goto end;
  2038. ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
  2039. if (ret)
  2040. goto end;
  2041. }
  2042. ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt);
  2043. end:
  2044. return ret;
  2045. }
  2046. /* Check whether the new xattr can be inserted into the inode. */
  2047. static int ocfs2_xattr_can_be_in_inode(struct inode *inode,
  2048. struct ocfs2_xattr_info *xi,
  2049. struct ocfs2_xattr_search *xs)
  2050. {
  2051. u64 value_size;
  2052. struct ocfs2_xattr_entry *last;
  2053. int free, i;
  2054. size_t min_offs = xs->end - xs->base;
  2055. if (!xs->header)
  2056. return 0;
  2057. last = xs->header->xh_entries;
  2058. for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
  2059. size_t offs = le16_to_cpu(last->xe_name_offset);
  2060. if (offs < min_offs)
  2061. min_offs = offs;
  2062. last += 1;
  2063. }
  2064. free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP;
  2065. if (free < 0)
  2066. return 0;
  2067. BUG_ON(!xs->not_found);
  2068. if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
  2069. value_size = OCFS2_XATTR_ROOT_SIZE;
  2070. else
  2071. value_size = OCFS2_XATTR_SIZE(xi->value_len);
  2072. if (free >= sizeof(struct ocfs2_xattr_entry) +
  2073. OCFS2_XATTR_SIZE(strlen(xi->name)) + value_size)
  2074. return 1;
  2075. return 0;
  2076. }
  2077. static int ocfs2_calc_xattr_set_need(struct inode *inode,
  2078. struct ocfs2_dinode *di,
  2079. struct ocfs2_xattr_info *xi,
  2080. struct ocfs2_xattr_search *xis,
  2081. struct ocfs2_xattr_search *xbs,
  2082. int *clusters_need,
  2083. int *meta_need,
  2084. int *credits_need)
  2085. {
  2086. int ret = 0, old_in_xb = 0;
  2087. int clusters_add = 0, meta_add = 0, credits = 0;
  2088. struct buffer_head *bh = NULL;
  2089. struct ocfs2_xattr_block *xb = NULL;
  2090. struct ocfs2_xattr_entry *xe = NULL;
  2091. struct ocfs2_xattr_value_root *xv = NULL;
  2092. char *base = NULL;
  2093. int name_offset, name_len = 0;
  2094. u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
  2095. xi->value_len);
  2096. u64 value_size;
  2097. /*
  2098. * Calculate the clusters we need to write.
  2099. * No matter whether we replace an old one or add a new one,
  2100. * we need this for writing.
  2101. */
  2102. if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
  2103. credits += new_clusters *
  2104. ocfs2_clusters_to_blocks(inode->i_sb, 1);
  2105. if (xis->not_found && xbs->not_found) {
  2106. credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
  2107. if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
  2108. clusters_add += new_clusters;
  2109. credits += ocfs2_calc_extend_credits(inode->i_sb,
  2110. &def_xv.xv.xr_list,
  2111. new_clusters);
  2112. }
  2113. goto meta_guess;
  2114. }
  2115. if (!xis->not_found) {
  2116. xe = xis->here;
  2117. name_offset = le16_to_cpu(xe->xe_name_offset);
  2118. name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
  2119. base = xis->base;
  2120. credits += OCFS2_INODE_UPDATE_CREDITS;
  2121. } else {
  2122. int i, block_off = 0;
  2123. xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
  2124. xe = xbs->here;
  2125. name_offset = le16_to_cpu(xe->xe_name_offset);
  2126. name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
  2127. i = xbs->here - xbs->header->xh_entries;
  2128. old_in_xb = 1;
  2129. if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
  2130. ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
  2131. bucket_xh(xbs->bucket),
  2132. i, &block_off,
  2133. &name_offset);
  2134. base = bucket_block(xbs->bucket, block_off);
  2135. credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
  2136. } else {
  2137. base = xbs->base;
  2138. credits += OCFS2_XATTR_BLOCK_UPDATE_CREDITS;
  2139. }
  2140. }
  2141. /*
  2142. * delete a xattr doesn't need metadata and cluster allocation.
  2143. * so just calculate the credits and return.
  2144. *
  2145. * The credits for removing the value tree will be extended
  2146. * by ocfs2_remove_extent itself.
  2147. */
  2148. if (!xi->value) {
  2149. if (!ocfs2_xattr_is_local(xe))
  2150. credits += ocfs2_remove_extent_credits(inode->i_sb);
  2151. goto out;
  2152. }
  2153. /* do cluster allocation guess first. */
  2154. value_size = le64_to_cpu(xe->xe_value_size);
  2155. if (old_in_xb) {
  2156. /*
  2157. * In xattr set, we always try to set the xe in inode first,
  2158. * so if it can be inserted into inode successfully, the old
  2159. * one will be removed from the xattr block, and this xattr
  2160. * will be inserted into inode as a new xattr in inode.
  2161. */
  2162. if (ocfs2_xattr_can_be_in_inode(inode, xi, xis)) {
  2163. clusters_add += new_clusters;
  2164. credits += ocfs2_remove_extent_credits(inode->i_sb) +
  2165. OCFS2_INODE_UPDATE_CREDITS;
  2166. if (!ocfs2_xattr_is_local(xe))
  2167. credits += ocfs2_calc_extend_credits(
  2168. inode->i_sb,
  2169. &def_xv.xv.xr_list,
  2170. new_clusters);
  2171. goto out;
  2172. }
  2173. }
  2174. if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
  2175. /* the new values will be stored outside. */
  2176. u32 old_clusters = 0;
  2177. if (!ocfs2_xattr_is_local(xe)) {
  2178. old_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
  2179. value_size);
  2180. xv = (struct ocfs2_xattr_value_root *)
  2181. (base + name_offset + name_len);
  2182. value_size = OCFS2_XATTR_ROOT_SIZE;
  2183. } else
  2184. xv = &def_xv.xv;
  2185. if (old_clusters >= new_clusters) {
  2186. credits += ocfs2_remove_extent_credits(inode->i_sb);
  2187. goto out;
  2188. } else {
  2189. meta_add += ocfs2_extend_meta_needed(&xv->xr_list);
  2190. clusters_add += new_clusters - old_clusters;
  2191. credits += ocfs2_calc_extend_credits(inode->i_sb,
  2192. &xv->xr_list,
  2193. new_clusters -
  2194. old_clusters);
  2195. if (value_size >= OCFS2_XATTR_ROOT_SIZE)
  2196. goto out;
  2197. }
  2198. } else {
  2199. /*
  2200. * Now the new value will be stored inside. So if the new
  2201. * value is smaller than the size of value root or the old
  2202. * value, we don't need any allocation, otherwise we have
  2203. * to guess metadata allocation.
  2204. */
  2205. if ((ocfs2_xattr_is_local(xe) && value_size >= xi->value_len) ||
  2206. (!ocfs2_xattr_is_local(xe) &&
  2207. OCFS2_XATTR_ROOT_SIZE >= xi->value_len))
  2208. goto out;
  2209. }
  2210. meta_guess:
  2211. /* calculate metadata allocation. */
  2212. if (di->i_xattr_loc) {
  2213. if (!xbs->xattr_bh) {
  2214. ret = ocfs2_read_xattr_block(inode,
  2215. le64_to_cpu(di->i_xattr_loc),
  2216. &bh);
  2217. if (ret) {
  2218. mlog_errno(ret);
  2219. goto out;
  2220. }
  2221. xb = (struct ocfs2_xattr_block *)bh->b_data;
  2222. } else
  2223. xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
  2224. /*
  2225. * If there is already an xattr tree, good, we can calculate
  2226. * like other b-trees. Otherwise we may have the chance of
  2227. * create a tree, the credit calculation is borrowed from
  2228. * ocfs2_calc_extend_credits with root_el = NULL. And the
  2229. * new tree will be cluster based, so no meta is needed.
  2230. */
  2231. if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
  2232. struct ocfs2_extent_list *el =
  2233. &xb->xb_attrs.xb_root.xt_list;
  2234. meta_add += ocfs2_extend_meta_needed(el);
  2235. credits += ocfs2_calc_extend_credits(inode->i_sb,
  2236. el, 1);
  2237. } else
  2238. credits += OCFS2_SUBALLOC_ALLOC + 1;
  2239. /*
  2240. * This cluster will be used either for new bucket or for
  2241. * new xattr block.
  2242. * If the cluster size is the same as the bucket size, one
  2243. * more is needed since we may need to extend the bucket
  2244. * also.
  2245. */
  2246. clusters_add += 1;
  2247. credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
  2248. if (OCFS2_XATTR_BUCKET_SIZE ==
  2249. OCFS2_SB(inode->i_sb)->s_clustersize) {
  2250. credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
  2251. clusters_add += 1;
  2252. }
  2253. } else {
  2254. meta_add += 1;
  2255. credits += OCFS2_XATTR_BLOCK_CREATE_CREDITS;
  2256. }
  2257. out:
  2258. if (clusters_need)
  2259. *clusters_need = clusters_add;
  2260. if (meta_need)
  2261. *meta_need = meta_add;
  2262. if (credits_need)
  2263. *credits_need = credits;
  2264. brelse(bh);
  2265. return ret;
  2266. }
  2267. static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
  2268. struct ocfs2_dinode *di,
  2269. struct ocfs2_xattr_info *xi,
  2270. struct ocfs2_xattr_search *xis,
  2271. struct ocfs2_xattr_search *xbs,
  2272. struct ocfs2_xattr_set_ctxt *ctxt,
  2273. int extra_meta,
  2274. int *credits)
  2275. {
  2276. int clusters_add, meta_add, ret;
  2277. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  2278. memset(ctxt, 0, sizeof(struct ocfs2_xattr_set_ctxt));
  2279. ocfs2_init_dealloc_ctxt(&ctxt->dealloc);
  2280. ret = ocfs2_calc_xattr_set_need(inode, di, xi, xis, xbs,
  2281. &clusters_add, &meta_add, credits);
  2282. if (ret) {
  2283. mlog_errno(ret);
  2284. return ret;
  2285. }
  2286. meta_add += extra_meta;
  2287. mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, "
  2288. "credits = %d\n", xi->name, meta_add, clusters_add, *credits);
  2289. if (meta_add) {
  2290. ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add,
  2291. &ctxt->meta_ac);
  2292. if (ret) {
  2293. mlog_errno(ret);
  2294. goto out;
  2295. }
  2296. }
  2297. if (clusters_add) {
  2298. ret = ocfs2_reserve_clusters(osb, clusters_add, &ctxt->data_ac);
  2299. if (ret)
  2300. mlog_errno(ret);
  2301. }
  2302. out:
  2303. if (ret) {
  2304. if (ctxt->meta_ac) {
  2305. ocfs2_free_alloc_context(ctxt->meta_ac);
  2306. ctxt->meta_ac = NULL;
  2307. }
  2308. /*
  2309. * We cannot have an error and a non null ctxt->data_ac.
  2310. */
  2311. }
  2312. return ret;
  2313. }
  2314. static int __ocfs2_xattr_set_handle(struct inode *inode,
  2315. struct ocfs2_dinode *di,
  2316. struct ocfs2_xattr_info *xi,
  2317. struct ocfs2_xattr_search *xis,
  2318. struct ocfs2_xattr_search *xbs,
  2319. struct ocfs2_xattr_set_ctxt *ctxt)
  2320. {
  2321. int ret = 0, credits, old_found;
  2322. if (!xi->value) {
  2323. /* Remove existing extended attribute */
  2324. if (!xis->not_found)
  2325. ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
  2326. else if (!xbs->not_found)
  2327. ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
  2328. } else {
  2329. /* We always try to set extended attribute into inode first*/
  2330. ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
  2331. if (!ret && !xbs->not_found) {
  2332. /*
  2333. * If succeed and that extended attribute existing in
  2334. * external block, then we will remove it.
  2335. */
  2336. xi->value = NULL;
  2337. xi->value_len = 0;
  2338. old_found = xis->not_found;
  2339. xis->not_found = -ENODATA;
  2340. ret = ocfs2_calc_xattr_set_need(inode,
  2341. di,
  2342. xi,
  2343. xis,
  2344. xbs,
  2345. NULL,
  2346. NULL,
  2347. &credits);
  2348. xis->not_found = old_found;
  2349. if (ret) {
  2350. mlog_errno(ret);
  2351. goto out;
  2352. }
  2353. ret = ocfs2_extend_trans(ctxt->handle, credits +
  2354. ctxt->handle->h_buffer_credits);
  2355. if (ret) {
  2356. mlog_errno(ret);
  2357. goto out;
  2358. }
  2359. ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
  2360. } else if (ret == -ENOSPC) {
  2361. if (di->i_xattr_loc && !xbs->xattr_bh) {
  2362. ret = ocfs2_xattr_block_find(inode,
  2363. xi->name_index,
  2364. xi->name, xbs);
  2365. if (ret)
  2366. goto out;
  2367. old_found = xis->not_found;
  2368. xis->not_found = -ENODATA;
  2369. ret = ocfs2_calc_xattr_set_need(inode,
  2370. di,
  2371. xi,
  2372. xis,
  2373. xbs,
  2374. NULL,
  2375. NULL,
  2376. &credits);
  2377. xis->not_found = old_found;
  2378. if (ret) {
  2379. mlog_errno(ret);
  2380. goto out;
  2381. }
  2382. ret = ocfs2_extend_trans(ctxt->handle, credits +
  2383. ctxt->handle->h_buffer_credits);
  2384. if (ret) {
  2385. mlog_errno(ret);
  2386. goto out;
  2387. }
  2388. }
  2389. /*
  2390. * If no space in inode, we will set extended attribute
  2391. * into external block.
  2392. */
  2393. ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt);
  2394. if (ret)
  2395. goto out;
  2396. if (!xis->not_found) {
  2397. /*
  2398. * If succeed and that extended attribute
  2399. * existing in inode, we will remove it.
  2400. */
  2401. xi->value = NULL;
  2402. xi->value_len = 0;
  2403. xbs->not_found = -ENODATA;
  2404. ret = ocfs2_calc_xattr_set_need(inode,
  2405. di,
  2406. xi,
  2407. xis,
  2408. xbs,
  2409. NULL,
  2410. NULL,
  2411. &credits);
  2412. if (ret) {
  2413. mlog_errno(ret);
  2414. goto out;
  2415. }
  2416. ret = ocfs2_extend_trans(ctxt->handle, credits +
  2417. ctxt->handle->h_buffer_credits);
  2418. if (ret) {
  2419. mlog_errno(ret);
  2420. goto out;
  2421. }
  2422. ret = ocfs2_xattr_ibody_set(inode, xi,
  2423. xis, ctxt);
  2424. }
  2425. }
  2426. }
  2427. if (!ret) {
  2428. /* Update inode ctime. */
  2429. ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
  2430. xis->inode_bh,
  2431. OCFS2_JOURNAL_ACCESS_WRITE);
  2432. if (ret) {
  2433. mlog_errno(ret);
  2434. goto out;
  2435. }
  2436. inode->i_ctime = CURRENT_TIME;
  2437. di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  2438. di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  2439. ocfs2_journal_dirty(ctxt->handle, xis->inode_bh);
  2440. }
  2441. out:
  2442. return ret;
  2443. }
  2444. /*
  2445. * This function only called duing creating inode
  2446. * for init security/acl xattrs of the new inode.
  2447. * All transanction credits have been reserved in mknod.
  2448. */
  2449. int ocfs2_xattr_set_handle(handle_t *handle,
  2450. struct inode *inode,
  2451. struct buffer_head *di_bh,
  2452. int name_index,
  2453. const char *name,
  2454. const void *value,
  2455. size_t value_len,
  2456. int flags,
  2457. struct ocfs2_alloc_context *meta_ac,
  2458. struct ocfs2_alloc_context *data_ac)
  2459. {
  2460. struct ocfs2_dinode *di;
  2461. int ret;
  2462. struct ocfs2_xattr_info xi = {
  2463. .name_index = name_index,
  2464. .name = name,
  2465. .value = value,
  2466. .value_len = value_len,
  2467. };
  2468. struct ocfs2_xattr_search xis = {
  2469. .not_found = -ENODATA,
  2470. };
  2471. struct ocfs2_xattr_search xbs = {
  2472. .not_found = -ENODATA,
  2473. };
  2474. struct ocfs2_xattr_set_ctxt ctxt = {
  2475. .handle = handle,
  2476. .meta_ac = meta_ac,
  2477. .data_ac = data_ac,
  2478. };
  2479. if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
  2480. return -EOPNOTSUPP;
  2481. /*
  2482. * In extreme situation, may need xattr bucket when
  2483. * block size is too small. And we have already reserved
  2484. * the credits for bucket in mknod.
  2485. */
  2486. if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) {
  2487. xbs.bucket = ocfs2_xattr_bucket_new(inode);
  2488. if (!xbs.bucket) {
  2489. mlog_errno(-ENOMEM);
  2490. return -ENOMEM;
  2491. }
  2492. }
  2493. xis.inode_bh = xbs.inode_bh = di_bh;
  2494. di = (struct ocfs2_dinode *)di_bh->b_data;
  2495. down_write(&OCFS2_I(inode)->ip_xattr_sem);
  2496. ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
  2497. if (ret)
  2498. goto cleanup;
  2499. if (xis.not_found) {
  2500. ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs);
  2501. if (ret)
  2502. goto cleanup;
  2503. }
  2504. ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
  2505. cleanup:
  2506. up_write(&OCFS2_I(inode)->ip_xattr_sem);
  2507. brelse(xbs.xattr_bh);
  2508. ocfs2_xattr_bucket_free(xbs.bucket);
  2509. return ret;
  2510. }
  2511. /*
  2512. * ocfs2_xattr_set()
  2513. *
  2514. * Set, replace or remove an extended attribute for this inode.
  2515. * value is NULL to remove an existing extended attribute, else either
  2516. * create or replace an extended attribute.
  2517. */
  2518. int ocfs2_xattr_set(struct inode *inode,
  2519. int name_index,
  2520. const char *name,
  2521. const void *value,
  2522. size_t value_len,
  2523. int flags)
  2524. {
  2525. struct buffer_head *di_bh = NULL;
  2526. struct ocfs2_dinode *di;
  2527. int ret, credits, ref_meta = 0, ref_credits = 0;
  2528. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  2529. struct inode *tl_inode = osb->osb_tl_inode;
  2530. struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, };
  2531. struct ocfs2_refcount_tree *ref_tree = NULL;
  2532. struct ocfs2_xattr_info xi = {
  2533. .name_index = name_index,
  2534. .name = name,
  2535. .value = value,
  2536. .value_len = value_len,
  2537. };
  2538. struct ocfs2_xattr_search xis = {
  2539. .not_found = -ENODATA,
  2540. };
  2541. struct ocfs2_xattr_search xbs = {
  2542. .not_found = -ENODATA,
  2543. };
  2544. if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb)))
  2545. return -EOPNOTSUPP;
  2546. /*
  2547. * Only xbs will be used on indexed trees. xis doesn't need a
  2548. * bucket.
  2549. */
  2550. xbs.bucket = ocfs2_xattr_bucket_new(inode);
  2551. if (!xbs.bucket) {
  2552. mlog_errno(-ENOMEM);
  2553. return -ENOMEM;
  2554. }
  2555. ret = ocfs2_inode_lock(inode, &di_bh, 1);
  2556. if (ret < 0) {
  2557. mlog_errno(ret);
  2558. goto cleanup_nolock;
  2559. }
  2560. xis.inode_bh = xbs.inode_bh = di_bh;
  2561. di = (struct ocfs2_dinode *)di_bh->b_data;
  2562. down_write(&OCFS2_I(inode)->ip_xattr_sem);
  2563. /*
  2564. * Scan inode and external block to find the same name
  2565. * extended attribute and collect search infomation.
  2566. */
  2567. ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis);
  2568. if (ret)
  2569. goto cleanup;
  2570. if (xis.not_found) {
  2571. ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs);
  2572. if (ret)
  2573. goto cleanup;
  2574. }
  2575. if (xis.not_found && xbs.not_found) {
  2576. ret = -ENODATA;
  2577. if (flags & XATTR_REPLACE)
  2578. goto cleanup;
  2579. ret = 0;
  2580. if (!value)
  2581. goto cleanup;
  2582. } else {
  2583. ret = -EEXIST;
  2584. if (flags & XATTR_CREATE)
  2585. goto cleanup;
  2586. }
  2587. /* Check whether the value is refcounted and do some prepartion. */
  2588. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL &&
  2589. (!xis.not_found || !xbs.not_found)) {
  2590. ret = ocfs2_prepare_refcount_xattr(inode, di, &xi,
  2591. &xis, &xbs, &ref_tree,
  2592. &ref_meta, &ref_credits);
  2593. if (ret) {
  2594. mlog_errno(ret);
  2595. goto cleanup;
  2596. }
  2597. }
  2598. mutex_lock(&tl_inode->i_mutex);
  2599. if (ocfs2_truncate_log_needs_flush(osb)) {
  2600. ret = __ocfs2_flush_truncate_log(osb);
  2601. if (ret < 0) {
  2602. mutex_unlock(&tl_inode->i_mutex);
  2603. mlog_errno(ret);
  2604. goto cleanup;
  2605. }
  2606. }
  2607. mutex_unlock(&tl_inode->i_mutex);
  2608. ret = ocfs2_init_xattr_set_ctxt(inode, di, &xi, &xis,
  2609. &xbs, &ctxt, ref_meta, &credits);
  2610. if (ret) {
  2611. mlog_errno(ret);
  2612. goto cleanup;
  2613. }
  2614. /* we need to update inode's ctime field, so add credit for it. */
  2615. credits += OCFS2_INODE_UPDATE_CREDITS;
  2616. ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
  2617. if (IS_ERR(ctxt.handle)) {
  2618. ret = PTR_ERR(ctxt.handle);
  2619. mlog_errno(ret);
  2620. goto cleanup;
  2621. }
  2622. ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
  2623. ocfs2_commit_trans(osb, ctxt.handle);
  2624. if (ctxt.data_ac)
  2625. ocfs2_free_alloc_context(ctxt.data_ac);
  2626. if (ctxt.meta_ac)
  2627. ocfs2_free_alloc_context(ctxt.meta_ac);
  2628. if (ocfs2_dealloc_has_cluster(&ctxt.dealloc))
  2629. ocfs2_schedule_truncate_log_flush(osb, 1);
  2630. ocfs2_run_deallocs(osb, &ctxt.dealloc);
  2631. cleanup:
  2632. if (ref_tree)
  2633. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  2634. up_write(&OCFS2_I(inode)->ip_xattr_sem);
  2635. if (!value && !ret) {
  2636. ret = ocfs2_try_remove_refcount_tree(inode, di_bh);
  2637. if (ret)
  2638. mlog_errno(ret);
  2639. }
  2640. ocfs2_inode_unlock(inode, 1);
  2641. cleanup_nolock:
  2642. brelse(di_bh);
  2643. brelse(xbs.xattr_bh);
  2644. ocfs2_xattr_bucket_free(xbs.bucket);
  2645. return ret;
  2646. }
  2647. /*
  2648. * Find the xattr extent rec which may contains name_hash.
  2649. * e_cpos will be the first name hash of the xattr rec.
  2650. * el must be the ocfs2_xattr_header.xb_attrs.xb_root.xt_list.
  2651. */
  2652. static int ocfs2_xattr_get_rec(struct inode *inode,
  2653. u32 name_hash,
  2654. u64 *p_blkno,
  2655. u32 *e_cpos,
  2656. u32 *num_clusters,
  2657. struct ocfs2_extent_list *el)
  2658. {
  2659. int ret = 0, i;
  2660. struct buffer_head *eb_bh = NULL;
  2661. struct ocfs2_extent_block *eb;
  2662. struct ocfs2_extent_rec *rec = NULL;
  2663. u64 e_blkno = 0;
  2664. if (el->l_tree_depth) {
  2665. ret = ocfs2_find_leaf(INODE_CACHE(inode), el, name_hash,
  2666. &eb_bh);
  2667. if (ret) {
  2668. mlog_errno(ret);
  2669. goto out;
  2670. }
  2671. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  2672. el = &eb->h_list;
  2673. if (el->l_tree_depth) {
  2674. ocfs2_error(inode->i_sb,
  2675. "Inode %lu has non zero tree depth in "
  2676. "xattr tree block %llu\n", inode->i_ino,
  2677. (unsigned long long)eb_bh->b_blocknr);
  2678. ret = -EROFS;
  2679. goto out;
  2680. }
  2681. }
  2682. for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
  2683. rec = &el->l_recs[i];
  2684. if (le32_to_cpu(rec->e_cpos) <= name_hash) {
  2685. e_blkno = le64_to_cpu(rec->e_blkno);
  2686. break;
  2687. }
  2688. }
  2689. if (!e_blkno) {
  2690. ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
  2691. "record (%u, %u, 0) in xattr", inode->i_ino,
  2692. le32_to_cpu(rec->e_cpos),
  2693. ocfs2_rec_clusters(el, rec));
  2694. ret = -EROFS;
  2695. goto out;
  2696. }
  2697. *p_blkno = le64_to_cpu(rec->e_blkno);
  2698. *num_clusters = le16_to_cpu(rec->e_leaf_clusters);
  2699. if (e_cpos)
  2700. *e_cpos = le32_to_cpu(rec->e_cpos);
  2701. out:
  2702. brelse(eb_bh);
  2703. return ret;
  2704. }
  2705. typedef int (xattr_bucket_func)(struct inode *inode,
  2706. struct ocfs2_xattr_bucket *bucket,
  2707. void *para);
  2708. static int ocfs2_find_xe_in_bucket(struct inode *inode,
  2709. struct ocfs2_xattr_bucket *bucket,
  2710. int name_index,
  2711. const char *name,
  2712. u32 name_hash,
  2713. u16 *xe_index,
  2714. int *found)
  2715. {
  2716. int i, ret = 0, cmp = 1, block_off, new_offset;
  2717. struct ocfs2_xattr_header *xh = bucket_xh(bucket);
  2718. size_t name_len = strlen(name);
  2719. struct ocfs2_xattr_entry *xe = NULL;
  2720. char *xe_name;
  2721. /*
  2722. * We don't use binary search in the bucket because there
  2723. * may be multiple entries with the same name hash.
  2724. */
  2725. for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
  2726. xe = &xh->xh_entries[i];
  2727. if (name_hash > le32_to_cpu(xe->xe_name_hash))
  2728. continue;
  2729. else if (name_hash < le32_to_cpu(xe->xe_name_hash))
  2730. break;
  2731. cmp = name_index - ocfs2_xattr_get_type(xe);
  2732. if (!cmp)
  2733. cmp = name_len - xe->xe_name_len;
  2734. if (cmp)
  2735. continue;
  2736. ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
  2737. xh,
  2738. i,
  2739. &block_off,
  2740. &new_offset);
  2741. if (ret) {
  2742. mlog_errno(ret);
  2743. break;
  2744. }
  2745. xe_name = bucket_block(bucket, block_off) + new_offset;
  2746. if (!memcmp(name, xe_name, name_len)) {
  2747. *xe_index = i;
  2748. *found = 1;
  2749. ret = 0;
  2750. break;
  2751. }
  2752. }
  2753. return ret;
  2754. }
  2755. /*
  2756. * Find the specified xattr entry in a series of buckets.
  2757. * This series start from p_blkno and last for num_clusters.
  2758. * The ocfs2_xattr_header.xh_num_buckets of the first bucket contains
  2759. * the num of the valid buckets.
  2760. *
  2761. * Return the buffer_head this xattr should reside in. And if the xattr's
  2762. * hash is in the gap of 2 buckets, return the lower bucket.
  2763. */
  2764. static int ocfs2_xattr_bucket_find(struct inode *inode,
  2765. int name_index,
  2766. const char *name,
  2767. u32 name_hash,
  2768. u64 p_blkno,
  2769. u32 first_hash,
  2770. u32 num_clusters,
  2771. struct ocfs2_xattr_search *xs)
  2772. {
  2773. int ret, found = 0;
  2774. struct ocfs2_xattr_header *xh = NULL;
  2775. struct ocfs2_xattr_entry *xe = NULL;
  2776. u16 index = 0;
  2777. u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
  2778. int low_bucket = 0, bucket, high_bucket;
  2779. struct ocfs2_xattr_bucket *search;
  2780. u32 last_hash;
  2781. u64 blkno, lower_blkno = 0;
  2782. search = ocfs2_xattr_bucket_new(inode);
  2783. if (!search) {
  2784. ret = -ENOMEM;
  2785. mlog_errno(ret);
  2786. goto out;
  2787. }
  2788. ret = ocfs2_read_xattr_bucket(search, p_blkno);
  2789. if (ret) {
  2790. mlog_errno(ret);
  2791. goto out;
  2792. }
  2793. xh = bucket_xh(search);
  2794. high_bucket = le16_to_cpu(xh->xh_num_buckets) - 1;
  2795. while (low_bucket <= high_bucket) {
  2796. ocfs2_xattr_bucket_relse(search);
  2797. bucket = (low_bucket + high_bucket) / 2;
  2798. blkno = p_blkno + bucket * blk_per_bucket;
  2799. ret = ocfs2_read_xattr_bucket(search, blkno);
  2800. if (ret) {
  2801. mlog_errno(ret);
  2802. goto out;
  2803. }
  2804. xh = bucket_xh(search);
  2805. xe = &xh->xh_entries[0];
  2806. if (name_hash < le32_to_cpu(xe->xe_name_hash)) {
  2807. high_bucket = bucket - 1;
  2808. continue;
  2809. }
  2810. /*
  2811. * Check whether the hash of the last entry in our
  2812. * bucket is larger than the search one. for an empty
  2813. * bucket, the last one is also the first one.
  2814. */
  2815. if (xh->xh_count)
  2816. xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
  2817. last_hash = le32_to_cpu(xe->xe_name_hash);
  2818. /* record lower_blkno which may be the insert place. */
  2819. lower_blkno = blkno;
  2820. if (name_hash > le32_to_cpu(xe->xe_name_hash)) {
  2821. low_bucket = bucket + 1;
  2822. continue;
  2823. }
  2824. /* the searched xattr should reside in this bucket if exists. */
  2825. ret = ocfs2_find_xe_in_bucket(inode, search,
  2826. name_index, name, name_hash,
  2827. &index, &found);
  2828. if (ret) {
  2829. mlog_errno(ret);
  2830. goto out;
  2831. }
  2832. break;
  2833. }
  2834. /*
  2835. * Record the bucket we have found.
  2836. * When the xattr's hash value is in the gap of 2 buckets, we will
  2837. * always set it to the previous bucket.
  2838. */
  2839. if (!lower_blkno)
  2840. lower_blkno = p_blkno;
  2841. /* This should be in cache - we just read it during the search */
  2842. ret = ocfs2_read_xattr_bucket(xs->bucket, lower_blkno);
  2843. if (ret) {
  2844. mlog_errno(ret);
  2845. goto out;
  2846. }
  2847. xs->header = bucket_xh(xs->bucket);
  2848. xs->base = bucket_block(xs->bucket, 0);
  2849. xs->end = xs->base + inode->i_sb->s_blocksize;
  2850. if (found) {
  2851. xs->here = &xs->header->xh_entries[index];
  2852. mlog(0, "find xattr %s in bucket %llu, entry = %u\n", name,
  2853. (unsigned long long)bucket_blkno(xs->bucket), index);
  2854. } else
  2855. ret = -ENODATA;
  2856. out:
  2857. ocfs2_xattr_bucket_free(search);
  2858. return ret;
  2859. }
  2860. static int ocfs2_xattr_index_block_find(struct inode *inode,
  2861. struct buffer_head *root_bh,
  2862. int name_index,
  2863. const char *name,
  2864. struct ocfs2_xattr_search *xs)
  2865. {
  2866. int ret;
  2867. struct ocfs2_xattr_block *xb =
  2868. (struct ocfs2_xattr_block *)root_bh->b_data;
  2869. struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root;
  2870. struct ocfs2_extent_list *el = &xb_root->xt_list;
  2871. u64 p_blkno = 0;
  2872. u32 first_hash, num_clusters = 0;
  2873. u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name));
  2874. if (le16_to_cpu(el->l_next_free_rec) == 0)
  2875. return -ENODATA;
  2876. mlog(0, "find xattr %s, hash = %u, index = %d in xattr tree\n",
  2877. name, name_hash, name_index);
  2878. ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash,
  2879. &num_clusters, el);
  2880. if (ret) {
  2881. mlog_errno(ret);
  2882. goto out;
  2883. }
  2884. BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash);
  2885. mlog(0, "find xattr extent rec %u clusters from %llu, the first hash "
  2886. "in the rec is %u\n", num_clusters, (unsigned long long)p_blkno,
  2887. first_hash);
  2888. ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash,
  2889. p_blkno, first_hash, num_clusters, xs);
  2890. out:
  2891. return ret;
  2892. }
  2893. static int ocfs2_iterate_xattr_buckets(struct inode *inode,
  2894. u64 blkno,
  2895. u32 clusters,
  2896. xattr_bucket_func *func,
  2897. void *para)
  2898. {
  2899. int i, ret = 0;
  2900. u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
  2901. u32 num_buckets = clusters * bpc;
  2902. struct ocfs2_xattr_bucket *bucket;
  2903. bucket = ocfs2_xattr_bucket_new(inode);
  2904. if (!bucket) {
  2905. mlog_errno(-ENOMEM);
  2906. return -ENOMEM;
  2907. }
  2908. mlog(0, "iterating xattr buckets in %u clusters starting from %llu\n",
  2909. clusters, (unsigned long long)blkno);
  2910. for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) {
  2911. ret = ocfs2_read_xattr_bucket(bucket, blkno);
  2912. if (ret) {
  2913. mlog_errno(ret);
  2914. break;
  2915. }
  2916. /*
  2917. * The real bucket num in this series of blocks is stored
  2918. * in the 1st bucket.
  2919. */
  2920. if (i == 0)
  2921. num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets);
  2922. mlog(0, "iterating xattr bucket %llu, first hash %u\n",
  2923. (unsigned long long)blkno,
  2924. le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash));
  2925. if (func) {
  2926. ret = func(inode, bucket, para);
  2927. if (ret && ret != -ERANGE)
  2928. mlog_errno(ret);
  2929. /* Fall through to bucket_relse() */
  2930. }
  2931. ocfs2_xattr_bucket_relse(bucket);
  2932. if (ret)
  2933. break;
  2934. }
  2935. ocfs2_xattr_bucket_free(bucket);
  2936. return ret;
  2937. }
  2938. struct ocfs2_xattr_tree_list {
  2939. char *buffer;
  2940. size_t buffer_size;
  2941. size_t result;
  2942. };
  2943. static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
  2944. struct ocfs2_xattr_header *xh,
  2945. int index,
  2946. int *block_off,
  2947. int *new_offset)
  2948. {
  2949. u16 name_offset;
  2950. if (index < 0 || index >= le16_to_cpu(xh->xh_count))
  2951. return -EINVAL;
  2952. name_offset = le16_to_cpu(xh->xh_entries[index].xe_name_offset);
  2953. *block_off = name_offset >> sb->s_blocksize_bits;
  2954. *new_offset = name_offset % sb->s_blocksize;
  2955. return 0;
  2956. }
  2957. static int ocfs2_list_xattr_bucket(struct inode *inode,
  2958. struct ocfs2_xattr_bucket *bucket,
  2959. void *para)
  2960. {
  2961. int ret = 0, type;
  2962. struct ocfs2_xattr_tree_list *xl = (struct ocfs2_xattr_tree_list *)para;
  2963. int i, block_off, new_offset;
  2964. const char *prefix, *name;
  2965. for (i = 0 ; i < le16_to_cpu(bucket_xh(bucket)->xh_count); i++) {
  2966. struct ocfs2_xattr_entry *entry = &bucket_xh(bucket)->xh_entries[i];
  2967. type = ocfs2_xattr_get_type(entry);
  2968. prefix = ocfs2_xattr_prefix(type);
  2969. if (prefix) {
  2970. ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
  2971. bucket_xh(bucket),
  2972. i,
  2973. &block_off,
  2974. &new_offset);
  2975. if (ret)
  2976. break;
  2977. name = (const char *)bucket_block(bucket, block_off) +
  2978. new_offset;
  2979. ret = ocfs2_xattr_list_entry(xl->buffer,
  2980. xl->buffer_size,
  2981. &xl->result,
  2982. prefix, name,
  2983. entry->xe_name_len);
  2984. if (ret)
  2985. break;
  2986. }
  2987. }
  2988. return ret;
  2989. }
  2990. static int ocfs2_iterate_xattr_index_block(struct inode *inode,
  2991. struct buffer_head *blk_bh,
  2992. xattr_tree_rec_func *rec_func,
  2993. void *para)
  2994. {
  2995. struct ocfs2_xattr_block *xb =
  2996. (struct ocfs2_xattr_block *)blk_bh->b_data;
  2997. struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list;
  2998. int ret = 0;
  2999. u32 name_hash = UINT_MAX, e_cpos = 0, num_clusters = 0;
  3000. u64 p_blkno = 0;
  3001. if (!el->l_next_free_rec || !rec_func)
  3002. return 0;
  3003. while (name_hash > 0) {
  3004. ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno,
  3005. &e_cpos, &num_clusters, el);
  3006. if (ret) {
  3007. mlog_errno(ret);
  3008. break;
  3009. }
  3010. ret = rec_func(inode, blk_bh, p_blkno, e_cpos,
  3011. num_clusters, para);
  3012. if (ret) {
  3013. if (ret != -ERANGE)
  3014. mlog_errno(ret);
  3015. break;
  3016. }
  3017. if (e_cpos == 0)
  3018. break;
  3019. name_hash = e_cpos - 1;
  3020. }
  3021. return ret;
  3022. }
  3023. static int ocfs2_list_xattr_tree_rec(struct inode *inode,
  3024. struct buffer_head *root_bh,
  3025. u64 blkno, u32 cpos, u32 len, void *para)
  3026. {
  3027. return ocfs2_iterate_xattr_buckets(inode, blkno, len,
  3028. ocfs2_list_xattr_bucket, para);
  3029. }
  3030. static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
  3031. struct buffer_head *blk_bh,
  3032. char *buffer,
  3033. size_t buffer_size)
  3034. {
  3035. int ret;
  3036. struct ocfs2_xattr_tree_list xl = {
  3037. .buffer = buffer,
  3038. .buffer_size = buffer_size,
  3039. .result = 0,
  3040. };
  3041. ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
  3042. ocfs2_list_xattr_tree_rec, &xl);
  3043. if (ret) {
  3044. mlog_errno(ret);
  3045. goto out;
  3046. }
  3047. ret = xl.result;
  3048. out:
  3049. return ret;
  3050. }
  3051. static int cmp_xe(const void *a, const void *b)
  3052. {
  3053. const struct ocfs2_xattr_entry *l = a, *r = b;
  3054. u32 l_hash = le32_to_cpu(l->xe_name_hash);
  3055. u32 r_hash = le32_to_cpu(r->xe_name_hash);
  3056. if (l_hash > r_hash)
  3057. return 1;
  3058. if (l_hash < r_hash)
  3059. return -1;
  3060. return 0;
  3061. }
  3062. static void swap_xe(void *a, void *b, int size)
  3063. {
  3064. struct ocfs2_xattr_entry *l = a, *r = b, tmp;
  3065. tmp = *l;
  3066. memcpy(l, r, sizeof(struct ocfs2_xattr_entry));
  3067. memcpy(r, &tmp, sizeof(struct ocfs2_xattr_entry));
  3068. }
  3069. /*
  3070. * When the ocfs2_xattr_block is filled up, new bucket will be created
  3071. * and all the xattr entries will be moved to the new bucket.
  3072. * The header goes at the start of the bucket, and the names+values are
  3073. * filled from the end. This is why *target starts as the last buffer.
  3074. * Note: we need to sort the entries since they are not saved in order
  3075. * in the ocfs2_xattr_block.
  3076. */
  3077. static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
  3078. struct buffer_head *xb_bh,
  3079. struct ocfs2_xattr_bucket *bucket)
  3080. {
  3081. int i, blocksize = inode->i_sb->s_blocksize;
  3082. int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
  3083. u16 offset, size, off_change;
  3084. struct ocfs2_xattr_entry *xe;
  3085. struct ocfs2_xattr_block *xb =
  3086. (struct ocfs2_xattr_block *)xb_bh->b_data;
  3087. struct ocfs2_xattr_header *xb_xh = &xb->xb_attrs.xb_header;
  3088. struct ocfs2_xattr_header *xh = bucket_xh(bucket);
  3089. u16 count = le16_to_cpu(xb_xh->xh_count);
  3090. char *src = xb_bh->b_data;
  3091. char *target = bucket_block(bucket, blks - 1);
  3092. mlog(0, "cp xattr from block %llu to bucket %llu\n",
  3093. (unsigned long long)xb_bh->b_blocknr,
  3094. (unsigned long long)bucket_blkno(bucket));
  3095. for (i = 0; i < blks; i++)
  3096. memset(bucket_block(bucket, i), 0, blocksize);
  3097. /*
  3098. * Since the xe_name_offset is based on ocfs2_xattr_header,
  3099. * there is a offset change corresponding to the change of
  3100. * ocfs2_xattr_header's position.
  3101. */
  3102. off_change = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
  3103. xe = &xb_xh->xh_entries[count - 1];
  3104. offset = le16_to_cpu(xe->xe_name_offset) + off_change;
  3105. size = blocksize - offset;
  3106. /* copy all the names and values. */
  3107. memcpy(target + offset, src + offset, size);
  3108. /* Init new header now. */
  3109. xh->xh_count = xb_xh->xh_count;
  3110. xh->xh_num_buckets = cpu_to_le16(1);
  3111. xh->xh_name_value_len = cpu_to_le16(size);
  3112. xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE - size);
  3113. /* copy all the entries. */
  3114. target = bucket_block(bucket, 0);
  3115. offset = offsetof(struct ocfs2_xattr_header, xh_entries);
  3116. size = count * sizeof(struct ocfs2_xattr_entry);
  3117. memcpy(target + offset, (char *)xb_xh + offset, size);
  3118. /* Change the xe offset for all the xe because of the move. */
  3119. off_change = OCFS2_XATTR_BUCKET_SIZE - blocksize +
  3120. offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
  3121. for (i = 0; i < count; i++)
  3122. le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change);
  3123. mlog(0, "copy entry: start = %u, size = %u, offset_change = %u\n",
  3124. offset, size, off_change);
  3125. sort(target + offset, count, sizeof(struct ocfs2_xattr_entry),
  3126. cmp_xe, swap_xe);
  3127. }
  3128. /*
  3129. * After we move xattr from block to index btree, we have to
  3130. * update ocfs2_xattr_search to the new xe and base.
  3131. *
  3132. * When the entry is in xattr block, xattr_bh indicates the storage place.
  3133. * While if the entry is in index b-tree, "bucket" indicates the
  3134. * real place of the xattr.
  3135. */
  3136. static void ocfs2_xattr_update_xattr_search(struct inode *inode,
  3137. struct ocfs2_xattr_search *xs,
  3138. struct buffer_head *old_bh)
  3139. {
  3140. char *buf = old_bh->b_data;
  3141. struct ocfs2_xattr_block *old_xb = (struct ocfs2_xattr_block *)buf;
  3142. struct ocfs2_xattr_header *old_xh = &old_xb->xb_attrs.xb_header;
  3143. int i;
  3144. xs->header = bucket_xh(xs->bucket);
  3145. xs->base = bucket_block(xs->bucket, 0);
  3146. xs->end = xs->base + inode->i_sb->s_blocksize;
  3147. if (xs->not_found)
  3148. return;
  3149. i = xs->here - old_xh->xh_entries;
  3150. xs->here = &xs->header->xh_entries[i];
  3151. }
  3152. static int ocfs2_xattr_create_index_block(struct inode *inode,
  3153. struct ocfs2_xattr_search *xs,
  3154. struct ocfs2_xattr_set_ctxt *ctxt)
  3155. {
  3156. int ret;
  3157. u32 bit_off, len;
  3158. u64 blkno;
  3159. handle_t *handle = ctxt->handle;
  3160. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3161. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  3162. struct buffer_head *xb_bh = xs->xattr_bh;
  3163. struct ocfs2_xattr_block *xb =
  3164. (struct ocfs2_xattr_block *)xb_bh->b_data;
  3165. struct ocfs2_xattr_tree_root *xr;
  3166. u16 xb_flags = le16_to_cpu(xb->xb_flags);
  3167. mlog(0, "create xattr index block for %llu\n",
  3168. (unsigned long long)xb_bh->b_blocknr);
  3169. BUG_ON(xb_flags & OCFS2_XATTR_INDEXED);
  3170. BUG_ON(!xs->bucket);
  3171. /*
  3172. * XXX:
  3173. * We can use this lock for now, and maybe move to a dedicated mutex
  3174. * if performance becomes a problem later.
  3175. */
  3176. down_write(&oi->ip_alloc_sem);
  3177. ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), xb_bh,
  3178. OCFS2_JOURNAL_ACCESS_WRITE);
  3179. if (ret) {
  3180. mlog_errno(ret);
  3181. goto out;
  3182. }
  3183. ret = __ocfs2_claim_clusters(osb, handle, ctxt->data_ac,
  3184. 1, 1, &bit_off, &len);
  3185. if (ret) {
  3186. mlog_errno(ret);
  3187. goto out;
  3188. }
  3189. /*
  3190. * The bucket may spread in many blocks, and
  3191. * we will only touch the 1st block and the last block
  3192. * in the whole bucket(one for entry and one for data).
  3193. */
  3194. blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
  3195. mlog(0, "allocate 1 cluster from %llu to xattr block\n",
  3196. (unsigned long long)blkno);
  3197. ret = ocfs2_init_xattr_bucket(xs->bucket, blkno);
  3198. if (ret) {
  3199. mlog_errno(ret);
  3200. goto out;
  3201. }
  3202. ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
  3203. OCFS2_JOURNAL_ACCESS_CREATE);
  3204. if (ret) {
  3205. mlog_errno(ret);
  3206. goto out;
  3207. }
  3208. ocfs2_cp_xattr_block_to_bucket(inode, xb_bh, xs->bucket);
  3209. ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
  3210. ocfs2_xattr_update_xattr_search(inode, xs, xb_bh);
  3211. /* Change from ocfs2_xattr_header to ocfs2_xattr_tree_root */
  3212. memset(&xb->xb_attrs, 0, inode->i_sb->s_blocksize -
  3213. offsetof(struct ocfs2_xattr_block, xb_attrs));
  3214. xr = &xb->xb_attrs.xb_root;
  3215. xr->xt_clusters = cpu_to_le32(1);
  3216. xr->xt_last_eb_blk = 0;
  3217. xr->xt_list.l_tree_depth = 0;
  3218. xr->xt_list.l_count = cpu_to_le16(ocfs2_xattr_recs_per_xb(inode->i_sb));
  3219. xr->xt_list.l_next_free_rec = cpu_to_le16(1);
  3220. xr->xt_list.l_recs[0].e_cpos = 0;
  3221. xr->xt_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
  3222. xr->xt_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
  3223. xb->xb_flags = cpu_to_le16(xb_flags | OCFS2_XATTR_INDEXED);
  3224. ocfs2_journal_dirty(handle, xb_bh);
  3225. out:
  3226. up_write(&oi->ip_alloc_sem);
  3227. return ret;
  3228. }
  3229. static int cmp_xe_offset(const void *a, const void *b)
  3230. {
  3231. const struct ocfs2_xattr_entry *l = a, *r = b;
  3232. u32 l_name_offset = le16_to_cpu(l->xe_name_offset);
  3233. u32 r_name_offset = le16_to_cpu(r->xe_name_offset);
  3234. if (l_name_offset < r_name_offset)
  3235. return 1;
  3236. if (l_name_offset > r_name_offset)
  3237. return -1;
  3238. return 0;
  3239. }
  3240. /*
  3241. * defrag a xattr bucket if we find that the bucket has some
  3242. * holes beteen name/value pairs.
  3243. * We will move all the name/value pairs to the end of the bucket
  3244. * so that we can spare some space for insertion.
  3245. */
  3246. static int ocfs2_defrag_xattr_bucket(struct inode *inode,
  3247. handle_t *handle,
  3248. struct ocfs2_xattr_bucket *bucket)
  3249. {
  3250. int ret, i;
  3251. size_t end, offset, len, value_len;
  3252. struct ocfs2_xattr_header *xh;
  3253. char *entries, *buf, *bucket_buf = NULL;
  3254. u64 blkno = bucket_blkno(bucket);
  3255. u16 xh_free_start;
  3256. size_t blocksize = inode->i_sb->s_blocksize;
  3257. struct ocfs2_xattr_entry *xe;
  3258. /*
  3259. * In order to make the operation more efficient and generic,
  3260. * we copy all the blocks into a contiguous memory and do the
  3261. * defragment there, so if anything is error, we will not touch
  3262. * the real block.
  3263. */
  3264. bucket_buf = kmalloc(OCFS2_XATTR_BUCKET_SIZE, GFP_NOFS);
  3265. if (!bucket_buf) {
  3266. ret = -EIO;
  3267. goto out;
  3268. }
  3269. buf = bucket_buf;
  3270. for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
  3271. memcpy(buf, bucket_block(bucket, i), blocksize);
  3272. ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
  3273. OCFS2_JOURNAL_ACCESS_WRITE);
  3274. if (ret < 0) {
  3275. mlog_errno(ret);
  3276. goto out;
  3277. }
  3278. xh = (struct ocfs2_xattr_header *)bucket_buf;
  3279. entries = (char *)xh->xh_entries;
  3280. xh_free_start = le16_to_cpu(xh->xh_free_start);
  3281. mlog(0, "adjust xattr bucket in %llu, count = %u, "
  3282. "xh_free_start = %u, xh_name_value_len = %u.\n",
  3283. (unsigned long long)blkno, le16_to_cpu(xh->xh_count),
  3284. xh_free_start, le16_to_cpu(xh->xh_name_value_len));
  3285. /*
  3286. * sort all the entries by their offset.
  3287. * the largest will be the first, so that we can
  3288. * move them to the end one by one.
  3289. */
  3290. sort(entries, le16_to_cpu(xh->xh_count),
  3291. sizeof(struct ocfs2_xattr_entry),
  3292. cmp_xe_offset, swap_xe);
  3293. /* Move all name/values to the end of the bucket. */
  3294. xe = xh->xh_entries;
  3295. end = OCFS2_XATTR_BUCKET_SIZE;
  3296. for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) {
  3297. offset = le16_to_cpu(xe->xe_name_offset);
  3298. if (ocfs2_xattr_is_local(xe))
  3299. value_len = OCFS2_XATTR_SIZE(
  3300. le64_to_cpu(xe->xe_value_size));
  3301. else
  3302. value_len = OCFS2_XATTR_ROOT_SIZE;
  3303. len = OCFS2_XATTR_SIZE(xe->xe_name_len) + value_len;
  3304. /*
  3305. * We must make sure that the name/value pair
  3306. * exist in the same block. So adjust end to
  3307. * the previous block end if needed.
  3308. */
  3309. if (((end - len) / blocksize !=
  3310. (end - 1) / blocksize))
  3311. end = end - end % blocksize;
  3312. if (end > offset + len) {
  3313. memmove(bucket_buf + end - len,
  3314. bucket_buf + offset, len);
  3315. xe->xe_name_offset = cpu_to_le16(end - len);
  3316. }
  3317. mlog_bug_on_msg(end < offset + len, "Defrag check failed for "
  3318. "bucket %llu\n", (unsigned long long)blkno);
  3319. end -= len;
  3320. }
  3321. mlog_bug_on_msg(xh_free_start > end, "Defrag check failed for "
  3322. "bucket %llu\n", (unsigned long long)blkno);
  3323. if (xh_free_start == end)
  3324. goto out;
  3325. memset(bucket_buf + xh_free_start, 0, end - xh_free_start);
  3326. xh->xh_free_start = cpu_to_le16(end);
  3327. /* sort the entries by their name_hash. */
  3328. sort(entries, le16_to_cpu(xh->xh_count),
  3329. sizeof(struct ocfs2_xattr_entry),
  3330. cmp_xe, swap_xe);
  3331. buf = bucket_buf;
  3332. for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
  3333. memcpy(bucket_block(bucket, i), buf, blocksize);
  3334. ocfs2_xattr_bucket_journal_dirty(handle, bucket);
  3335. out:
  3336. kfree(bucket_buf);
  3337. return ret;
  3338. }
  3339. /*
  3340. * prev_blkno points to the start of an existing extent. new_blkno
  3341. * points to a newly allocated extent. Because we know each of our
  3342. * clusters contains more than bucket, we can easily split one cluster
  3343. * at a bucket boundary. So we take the last cluster of the existing
  3344. * extent and split it down the middle. We move the last half of the
  3345. * buckets in the last cluster of the existing extent over to the new
  3346. * extent.
  3347. *
  3348. * first_bh is the buffer at prev_blkno so we can update the existing
  3349. * extent's bucket count. header_bh is the bucket were we were hoping
  3350. * to insert our xattr. If the bucket move places the target in the new
  3351. * extent, we'll update first_bh and header_bh after modifying the old
  3352. * extent.
  3353. *
  3354. * first_hash will be set as the 1st xe's name_hash in the new extent.
  3355. */
  3356. static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode,
  3357. handle_t *handle,
  3358. struct ocfs2_xattr_bucket *first,
  3359. struct ocfs2_xattr_bucket *target,
  3360. u64 new_blkno,
  3361. u32 num_clusters,
  3362. u32 *first_hash)
  3363. {
  3364. int ret;
  3365. struct super_block *sb = inode->i_sb;
  3366. int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(sb);
  3367. int num_buckets = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
  3368. int to_move = num_buckets / 2;
  3369. u64 src_blkno;
  3370. u64 last_cluster_blkno = bucket_blkno(first) +
  3371. ((num_clusters - 1) * ocfs2_clusters_to_blocks(sb, 1));
  3372. BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets);
  3373. BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize);
  3374. mlog(0, "move half of xattrs in cluster %llu to %llu\n",
  3375. (unsigned long long)last_cluster_blkno, (unsigned long long)new_blkno);
  3376. ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first),
  3377. last_cluster_blkno, new_blkno,
  3378. to_move, first_hash);
  3379. if (ret) {
  3380. mlog_errno(ret);
  3381. goto out;
  3382. }
  3383. /* This is the first bucket that got moved */
  3384. src_blkno = last_cluster_blkno + (to_move * blks_per_bucket);
  3385. /*
  3386. * If the target bucket was part of the moved buckets, we need to
  3387. * update first and target.
  3388. */
  3389. if (bucket_blkno(target) >= src_blkno) {
  3390. /* Find the block for the new target bucket */
  3391. src_blkno = new_blkno +
  3392. (bucket_blkno(target) - src_blkno);
  3393. ocfs2_xattr_bucket_relse(first);
  3394. ocfs2_xattr_bucket_relse(target);
  3395. /*
  3396. * These shouldn't fail - the buffers are in the
  3397. * journal from ocfs2_cp_xattr_bucket().
  3398. */
  3399. ret = ocfs2_read_xattr_bucket(first, new_blkno);
  3400. if (ret) {
  3401. mlog_errno(ret);
  3402. goto out;
  3403. }
  3404. ret = ocfs2_read_xattr_bucket(target, src_blkno);
  3405. if (ret)
  3406. mlog_errno(ret);
  3407. }
  3408. out:
  3409. return ret;
  3410. }
  3411. /*
  3412. * Find the suitable pos when we divide a bucket into 2.
  3413. * We have to make sure the xattrs with the same hash value exist
  3414. * in the same bucket.
  3415. *
  3416. * If this ocfs2_xattr_header covers more than one hash value, find a
  3417. * place where the hash value changes. Try to find the most even split.
  3418. * The most common case is that all entries have different hash values,
  3419. * and the first check we make will find a place to split.
  3420. */
  3421. static int ocfs2_xattr_find_divide_pos(struct ocfs2_xattr_header *xh)
  3422. {
  3423. struct ocfs2_xattr_entry *entries = xh->xh_entries;
  3424. int count = le16_to_cpu(xh->xh_count);
  3425. int delta, middle = count / 2;
  3426. /*
  3427. * We start at the middle. Each step gets farther away in both
  3428. * directions. We therefore hit the change in hash value
  3429. * nearest to the middle. Note that this loop does not execute for
  3430. * count < 2.
  3431. */
  3432. for (delta = 0; delta < middle; delta++) {
  3433. /* Let's check delta earlier than middle */
  3434. if (cmp_xe(&entries[middle - delta - 1],
  3435. &entries[middle - delta]))
  3436. return middle - delta;
  3437. /* For even counts, don't walk off the end */
  3438. if ((middle + delta + 1) == count)
  3439. continue;
  3440. /* Now try delta past middle */
  3441. if (cmp_xe(&entries[middle + delta],
  3442. &entries[middle + delta + 1]))
  3443. return middle + delta + 1;
  3444. }
  3445. /* Every entry had the same hash */
  3446. return count;
  3447. }
  3448. /*
  3449. * Move some xattrs in old bucket(blk) to new bucket(new_blk).
  3450. * first_hash will record the 1st hash of the new bucket.
  3451. *
  3452. * Normally half of the xattrs will be moved. But we have to make
  3453. * sure that the xattrs with the same hash value are stored in the
  3454. * same bucket. If all the xattrs in this bucket have the same hash
  3455. * value, the new bucket will be initialized as an empty one and the
  3456. * first_hash will be initialized as (hash_value+1).
  3457. */
  3458. static int ocfs2_divide_xattr_bucket(struct inode *inode,
  3459. handle_t *handle,
  3460. u64 blk,
  3461. u64 new_blk,
  3462. u32 *first_hash,
  3463. int new_bucket_head)
  3464. {
  3465. int ret, i;
  3466. int count, start, len, name_value_len = 0, xe_len, name_offset = 0;
  3467. struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
  3468. struct ocfs2_xattr_header *xh;
  3469. struct ocfs2_xattr_entry *xe;
  3470. int blocksize = inode->i_sb->s_blocksize;
  3471. mlog(0, "move some of xattrs from bucket %llu to %llu\n",
  3472. (unsigned long long)blk, (unsigned long long)new_blk);
  3473. s_bucket = ocfs2_xattr_bucket_new(inode);
  3474. t_bucket = ocfs2_xattr_bucket_new(inode);
  3475. if (!s_bucket || !t_bucket) {
  3476. ret = -ENOMEM;
  3477. mlog_errno(ret);
  3478. goto out;
  3479. }
  3480. ret = ocfs2_read_xattr_bucket(s_bucket, blk);
  3481. if (ret) {
  3482. mlog_errno(ret);
  3483. goto out;
  3484. }
  3485. ret = ocfs2_xattr_bucket_journal_access(handle, s_bucket,
  3486. OCFS2_JOURNAL_ACCESS_WRITE);
  3487. if (ret) {
  3488. mlog_errno(ret);
  3489. goto out;
  3490. }
  3491. /*
  3492. * Even if !new_bucket_head, we're overwriting t_bucket. Thus,
  3493. * there's no need to read it.
  3494. */
  3495. ret = ocfs2_init_xattr_bucket(t_bucket, new_blk);
  3496. if (ret) {
  3497. mlog_errno(ret);
  3498. goto out;
  3499. }
  3500. /*
  3501. * Hey, if we're overwriting t_bucket, what difference does
  3502. * ACCESS_CREATE vs ACCESS_WRITE make? See the comment in the
  3503. * same part of ocfs2_cp_xattr_bucket().
  3504. */
  3505. ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket,
  3506. new_bucket_head ?
  3507. OCFS2_JOURNAL_ACCESS_CREATE :
  3508. OCFS2_JOURNAL_ACCESS_WRITE);
  3509. if (ret) {
  3510. mlog_errno(ret);
  3511. goto out;
  3512. }
  3513. xh = bucket_xh(s_bucket);
  3514. count = le16_to_cpu(xh->xh_count);
  3515. start = ocfs2_xattr_find_divide_pos(xh);
  3516. if (start == count) {
  3517. xe = &xh->xh_entries[start-1];
  3518. /*
  3519. * initialized a new empty bucket here.
  3520. * The hash value is set as one larger than
  3521. * that of the last entry in the previous bucket.
  3522. */
  3523. for (i = 0; i < t_bucket->bu_blocks; i++)
  3524. memset(bucket_block(t_bucket, i), 0, blocksize);
  3525. xh = bucket_xh(t_bucket);
  3526. xh->xh_free_start = cpu_to_le16(blocksize);
  3527. xh->xh_entries[0].xe_name_hash = xe->xe_name_hash;
  3528. le32_add_cpu(&xh->xh_entries[0].xe_name_hash, 1);
  3529. goto set_num_buckets;
  3530. }
  3531. /* copy the whole bucket to the new first. */
  3532. ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket);
  3533. /* update the new bucket. */
  3534. xh = bucket_xh(t_bucket);
  3535. /*
  3536. * Calculate the total name/value len and xh_free_start for
  3537. * the old bucket first.
  3538. */
  3539. name_offset = OCFS2_XATTR_BUCKET_SIZE;
  3540. name_value_len = 0;
  3541. for (i = 0; i < start; i++) {
  3542. xe = &xh->xh_entries[i];
  3543. xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
  3544. if (ocfs2_xattr_is_local(xe))
  3545. xe_len +=
  3546. OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
  3547. else
  3548. xe_len += OCFS2_XATTR_ROOT_SIZE;
  3549. name_value_len += xe_len;
  3550. if (le16_to_cpu(xe->xe_name_offset) < name_offset)
  3551. name_offset = le16_to_cpu(xe->xe_name_offset);
  3552. }
  3553. /*
  3554. * Now begin the modification to the new bucket.
  3555. *
  3556. * In the new bucket, We just move the xattr entry to the beginning
  3557. * and don't touch the name/value. So there will be some holes in the
  3558. * bucket, and they will be removed when ocfs2_defrag_xattr_bucket is
  3559. * called.
  3560. */
  3561. xe = &xh->xh_entries[start];
  3562. len = sizeof(struct ocfs2_xattr_entry) * (count - start);
  3563. mlog(0, "mv xattr entry len %d from %d to %d\n", len,
  3564. (int)((char *)xe - (char *)xh),
  3565. (int)((char *)xh->xh_entries - (char *)xh));
  3566. memmove((char *)xh->xh_entries, (char *)xe, len);
  3567. xe = &xh->xh_entries[count - start];
  3568. len = sizeof(struct ocfs2_xattr_entry) * start;
  3569. memset((char *)xe, 0, len);
  3570. le16_add_cpu(&xh->xh_count, -start);
  3571. le16_add_cpu(&xh->xh_name_value_len, -name_value_len);
  3572. /* Calculate xh_free_start for the new bucket. */
  3573. xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
  3574. for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
  3575. xe = &xh->xh_entries[i];
  3576. xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
  3577. if (ocfs2_xattr_is_local(xe))
  3578. xe_len +=
  3579. OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
  3580. else
  3581. xe_len += OCFS2_XATTR_ROOT_SIZE;
  3582. if (le16_to_cpu(xe->xe_name_offset) <
  3583. le16_to_cpu(xh->xh_free_start))
  3584. xh->xh_free_start = xe->xe_name_offset;
  3585. }
  3586. set_num_buckets:
  3587. /* set xh->xh_num_buckets for the new xh. */
  3588. if (new_bucket_head)
  3589. xh->xh_num_buckets = cpu_to_le16(1);
  3590. else
  3591. xh->xh_num_buckets = 0;
  3592. ocfs2_xattr_bucket_journal_dirty(handle, t_bucket);
  3593. /* store the first_hash of the new bucket. */
  3594. if (first_hash)
  3595. *first_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash);
  3596. /*
  3597. * Now only update the 1st block of the old bucket. If we
  3598. * just added a new empty bucket, there is no need to modify
  3599. * it.
  3600. */
  3601. if (start == count)
  3602. goto out;
  3603. xh = bucket_xh(s_bucket);
  3604. memset(&xh->xh_entries[start], 0,
  3605. sizeof(struct ocfs2_xattr_entry) * (count - start));
  3606. xh->xh_count = cpu_to_le16(start);
  3607. xh->xh_free_start = cpu_to_le16(name_offset);
  3608. xh->xh_name_value_len = cpu_to_le16(name_value_len);
  3609. ocfs2_xattr_bucket_journal_dirty(handle, s_bucket);
  3610. out:
  3611. ocfs2_xattr_bucket_free(s_bucket);
  3612. ocfs2_xattr_bucket_free(t_bucket);
  3613. return ret;
  3614. }
  3615. /*
  3616. * Copy xattr from one bucket to another bucket.
  3617. *
  3618. * The caller must make sure that the journal transaction
  3619. * has enough space for journaling.
  3620. */
  3621. static int ocfs2_cp_xattr_bucket(struct inode *inode,
  3622. handle_t *handle,
  3623. u64 s_blkno,
  3624. u64 t_blkno,
  3625. int t_is_new)
  3626. {
  3627. int ret;
  3628. struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
  3629. BUG_ON(s_blkno == t_blkno);
  3630. mlog(0, "cp bucket %llu to %llu, target is %d\n",
  3631. (unsigned long long)s_blkno, (unsigned long long)t_blkno,
  3632. t_is_new);
  3633. s_bucket = ocfs2_xattr_bucket_new(inode);
  3634. t_bucket = ocfs2_xattr_bucket_new(inode);
  3635. if (!s_bucket || !t_bucket) {
  3636. ret = -ENOMEM;
  3637. mlog_errno(ret);
  3638. goto out;
  3639. }
  3640. ret = ocfs2_read_xattr_bucket(s_bucket, s_blkno);
  3641. if (ret)
  3642. goto out;
  3643. /*
  3644. * Even if !t_is_new, we're overwriting t_bucket. Thus,
  3645. * there's no need to read it.
  3646. */
  3647. ret = ocfs2_init_xattr_bucket(t_bucket, t_blkno);
  3648. if (ret)
  3649. goto out;
  3650. /*
  3651. * Hey, if we're overwriting t_bucket, what difference does
  3652. * ACCESS_CREATE vs ACCESS_WRITE make? Well, if we allocated a new
  3653. * cluster to fill, we came here from
  3654. * ocfs2_mv_xattr_buckets(), and it is really new -
  3655. * ACCESS_CREATE is required. But we also might have moved data
  3656. * out of t_bucket before extending back into it.
  3657. * ocfs2_add_new_xattr_bucket() can do this - its call to
  3658. * ocfs2_add_new_xattr_cluster() may have created a new extent
  3659. * and copied out the end of the old extent. Then it re-extends
  3660. * the old extent back to create space for new xattrs. That's
  3661. * how we get here, and the bucket isn't really new.
  3662. */
  3663. ret = ocfs2_xattr_bucket_journal_access(handle, t_bucket,
  3664. t_is_new ?
  3665. OCFS2_JOURNAL_ACCESS_CREATE :
  3666. OCFS2_JOURNAL_ACCESS_WRITE);
  3667. if (ret)
  3668. goto out;
  3669. ocfs2_xattr_bucket_copy_data(t_bucket, s_bucket);
  3670. ocfs2_xattr_bucket_journal_dirty(handle, t_bucket);
  3671. out:
  3672. ocfs2_xattr_bucket_free(t_bucket);
  3673. ocfs2_xattr_bucket_free(s_bucket);
  3674. return ret;
  3675. }
  3676. /*
  3677. * src_blk points to the start of an existing extent. last_blk points to
  3678. * last cluster in that extent. to_blk points to a newly allocated
  3679. * extent. We copy the buckets from the cluster at last_blk to the new
  3680. * extent. If start_bucket is non-zero, we skip that many buckets before
  3681. * we start copying. The new extent's xh_num_buckets gets set to the
  3682. * number of buckets we copied. The old extent's xh_num_buckets shrinks
  3683. * by the same amount.
  3684. */
  3685. static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
  3686. u64 src_blk, u64 last_blk, u64 to_blk,
  3687. unsigned int start_bucket,
  3688. u32 *first_hash)
  3689. {
  3690. int i, ret, credits;
  3691. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3692. int blks_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
  3693. int num_buckets = ocfs2_xattr_buckets_per_cluster(osb);
  3694. struct ocfs2_xattr_bucket *old_first, *new_first;
  3695. mlog(0, "mv xattrs from cluster %llu to %llu\n",
  3696. (unsigned long long)last_blk, (unsigned long long)to_blk);
  3697. BUG_ON(start_bucket >= num_buckets);
  3698. if (start_bucket) {
  3699. num_buckets -= start_bucket;
  3700. last_blk += (start_bucket * blks_per_bucket);
  3701. }
  3702. /* The first bucket of the original extent */
  3703. old_first = ocfs2_xattr_bucket_new(inode);
  3704. /* The first bucket of the new extent */
  3705. new_first = ocfs2_xattr_bucket_new(inode);
  3706. if (!old_first || !new_first) {
  3707. ret = -ENOMEM;
  3708. mlog_errno(ret);
  3709. goto out;
  3710. }
  3711. ret = ocfs2_read_xattr_bucket(old_first, src_blk);
  3712. if (ret) {
  3713. mlog_errno(ret);
  3714. goto out;
  3715. }
  3716. /*
  3717. * We need to update the first bucket of the old extent and all
  3718. * the buckets going to the new extent.
  3719. */
  3720. credits = ((num_buckets + 1) * blks_per_bucket) +
  3721. handle->h_buffer_credits;
  3722. ret = ocfs2_extend_trans(handle, credits);
  3723. if (ret) {
  3724. mlog_errno(ret);
  3725. goto out;
  3726. }
  3727. ret = ocfs2_xattr_bucket_journal_access(handle, old_first,
  3728. OCFS2_JOURNAL_ACCESS_WRITE);
  3729. if (ret) {
  3730. mlog_errno(ret);
  3731. goto out;
  3732. }
  3733. for (i = 0; i < num_buckets; i++) {
  3734. ret = ocfs2_cp_xattr_bucket(inode, handle,
  3735. last_blk + (i * blks_per_bucket),
  3736. to_blk + (i * blks_per_bucket),
  3737. 1);
  3738. if (ret) {
  3739. mlog_errno(ret);
  3740. goto out;
  3741. }
  3742. }
  3743. /*
  3744. * Get the new bucket ready before we dirty anything
  3745. * (This actually shouldn't fail, because we already dirtied
  3746. * it once in ocfs2_cp_xattr_bucket()).
  3747. */
  3748. ret = ocfs2_read_xattr_bucket(new_first, to_blk);
  3749. if (ret) {
  3750. mlog_errno(ret);
  3751. goto out;
  3752. }
  3753. ret = ocfs2_xattr_bucket_journal_access(handle, new_first,
  3754. OCFS2_JOURNAL_ACCESS_WRITE);
  3755. if (ret) {
  3756. mlog_errno(ret);
  3757. goto out;
  3758. }
  3759. /* Now update the headers */
  3760. le16_add_cpu(&bucket_xh(old_first)->xh_num_buckets, -num_buckets);
  3761. ocfs2_xattr_bucket_journal_dirty(handle, old_first);
  3762. bucket_xh(new_first)->xh_num_buckets = cpu_to_le16(num_buckets);
  3763. ocfs2_xattr_bucket_journal_dirty(handle, new_first);
  3764. if (first_hash)
  3765. *first_hash = le32_to_cpu(bucket_xh(new_first)->xh_entries[0].xe_name_hash);
  3766. out:
  3767. ocfs2_xattr_bucket_free(new_first);
  3768. ocfs2_xattr_bucket_free(old_first);
  3769. return ret;
  3770. }
  3771. /*
  3772. * Move some xattrs in this cluster to the new cluster.
  3773. * This function should only be called when bucket size == cluster size.
  3774. * Otherwise ocfs2_mv_xattr_bucket_cross_cluster should be used instead.
  3775. */
  3776. static int ocfs2_divide_xattr_cluster(struct inode *inode,
  3777. handle_t *handle,
  3778. u64 prev_blk,
  3779. u64 new_blk,
  3780. u32 *first_hash)
  3781. {
  3782. u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
  3783. int ret, credits = 2 * blk_per_bucket + handle->h_buffer_credits;
  3784. BUG_ON(OCFS2_XATTR_BUCKET_SIZE < OCFS2_SB(inode->i_sb)->s_clustersize);
  3785. ret = ocfs2_extend_trans(handle, credits);
  3786. if (ret) {
  3787. mlog_errno(ret);
  3788. return ret;
  3789. }
  3790. /* Move half of the xattr in start_blk to the next bucket. */
  3791. return ocfs2_divide_xattr_bucket(inode, handle, prev_blk,
  3792. new_blk, first_hash, 1);
  3793. }
  3794. /*
  3795. * Move some xattrs from the old cluster to the new one since they are not
  3796. * contiguous in ocfs2 xattr tree.
  3797. *
  3798. * new_blk starts a new separate cluster, and we will move some xattrs from
  3799. * prev_blk to it. v_start will be set as the first name hash value in this
  3800. * new cluster so that it can be used as e_cpos during tree insertion and
  3801. * don't collide with our original b-tree operations. first_bh and header_bh
  3802. * will also be updated since they will be used in ocfs2_extend_xattr_bucket
  3803. * to extend the insert bucket.
  3804. *
  3805. * The problem is how much xattr should we move to the new one and when should
  3806. * we update first_bh and header_bh?
  3807. * 1. If cluster size > bucket size, that means the previous cluster has more
  3808. * than 1 bucket, so just move half nums of bucket into the new cluster and
  3809. * update the first_bh and header_bh if the insert bucket has been moved
  3810. * to the new cluster.
  3811. * 2. If cluster_size == bucket_size:
  3812. * a) If the previous extent rec has more than one cluster and the insert
  3813. * place isn't in the last cluster, copy the entire last cluster to the
  3814. * new one. This time, we don't need to upate the first_bh and header_bh
  3815. * since they will not be moved into the new cluster.
  3816. * b) Otherwise, move the bottom half of the xattrs in the last cluster into
  3817. * the new one. And we set the extend flag to zero if the insert place is
  3818. * moved into the new allocated cluster since no extend is needed.
  3819. */
  3820. static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode,
  3821. handle_t *handle,
  3822. struct ocfs2_xattr_bucket *first,
  3823. struct ocfs2_xattr_bucket *target,
  3824. u64 new_blk,
  3825. u32 prev_clusters,
  3826. u32 *v_start,
  3827. int *extend)
  3828. {
  3829. int ret;
  3830. mlog(0, "adjust xattrs from cluster %llu len %u to %llu\n",
  3831. (unsigned long long)bucket_blkno(first), prev_clusters,
  3832. (unsigned long long)new_blk);
  3833. if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) {
  3834. ret = ocfs2_mv_xattr_bucket_cross_cluster(inode,
  3835. handle,
  3836. first, target,
  3837. new_blk,
  3838. prev_clusters,
  3839. v_start);
  3840. if (ret)
  3841. mlog_errno(ret);
  3842. } else {
  3843. /* The start of the last cluster in the first extent */
  3844. u64 last_blk = bucket_blkno(first) +
  3845. ((prev_clusters - 1) *
  3846. ocfs2_clusters_to_blocks(inode->i_sb, 1));
  3847. if (prev_clusters > 1 && bucket_blkno(target) != last_blk) {
  3848. ret = ocfs2_mv_xattr_buckets(inode, handle,
  3849. bucket_blkno(first),
  3850. last_blk, new_blk, 0,
  3851. v_start);
  3852. if (ret)
  3853. mlog_errno(ret);
  3854. } else {
  3855. ret = ocfs2_divide_xattr_cluster(inode, handle,
  3856. last_blk, new_blk,
  3857. v_start);
  3858. if (ret)
  3859. mlog_errno(ret);
  3860. if ((bucket_blkno(target) == last_blk) && extend)
  3861. *extend = 0;
  3862. }
  3863. }
  3864. return ret;
  3865. }
  3866. /*
  3867. * Add a new cluster for xattr storage.
  3868. *
  3869. * If the new cluster is contiguous with the previous one, it will be
  3870. * appended to the same extent record, and num_clusters will be updated.
  3871. * If not, we will insert a new extent for it and move some xattrs in
  3872. * the last cluster into the new allocated one.
  3873. * We also need to limit the maximum size of a btree leaf, otherwise we'll
  3874. * lose the benefits of hashing because we'll have to search large leaves.
  3875. * So now the maximum size is OCFS2_MAX_XATTR_TREE_LEAF_SIZE(or clustersize,
  3876. * if it's bigger).
  3877. *
  3878. * first_bh is the first block of the previous extent rec and header_bh
  3879. * indicates the bucket we will insert the new xattrs. They will be updated
  3880. * when the header_bh is moved into the new cluster.
  3881. */
  3882. static int ocfs2_add_new_xattr_cluster(struct inode *inode,
  3883. struct buffer_head *root_bh,
  3884. struct ocfs2_xattr_bucket *first,
  3885. struct ocfs2_xattr_bucket *target,
  3886. u32 *num_clusters,
  3887. u32 prev_cpos,
  3888. int *extend,
  3889. struct ocfs2_xattr_set_ctxt *ctxt)
  3890. {
  3891. int ret;
  3892. u16 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
  3893. u32 prev_clusters = *num_clusters;
  3894. u32 clusters_to_add = 1, bit_off, num_bits, v_start = 0;
  3895. u64 block;
  3896. handle_t *handle = ctxt->handle;
  3897. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3898. struct ocfs2_extent_tree et;
  3899. mlog(0, "Add new xattr cluster for %llu, previous xattr hash = %u, "
  3900. "previous xattr blkno = %llu\n",
  3901. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  3902. prev_cpos, (unsigned long long)bucket_blkno(first));
  3903. ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
  3904. ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
  3905. OCFS2_JOURNAL_ACCESS_WRITE);
  3906. if (ret < 0) {
  3907. mlog_errno(ret);
  3908. goto leave;
  3909. }
  3910. ret = __ocfs2_claim_clusters(osb, handle, ctxt->data_ac, 1,
  3911. clusters_to_add, &bit_off, &num_bits);
  3912. if (ret < 0) {
  3913. if (ret != -ENOSPC)
  3914. mlog_errno(ret);
  3915. goto leave;
  3916. }
  3917. BUG_ON(num_bits > clusters_to_add);
  3918. block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
  3919. mlog(0, "Allocating %u clusters at block %u for xattr in inode %llu\n",
  3920. num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
  3921. if (bucket_blkno(first) + (prev_clusters * bpc) == block &&
  3922. (prev_clusters + num_bits) << osb->s_clustersize_bits <=
  3923. OCFS2_MAX_XATTR_TREE_LEAF_SIZE) {
  3924. /*
  3925. * If this cluster is contiguous with the old one and
  3926. * adding this new cluster, we don't surpass the limit of
  3927. * OCFS2_MAX_XATTR_TREE_LEAF_SIZE, cool. We will let it be
  3928. * initialized and used like other buckets in the previous
  3929. * cluster.
  3930. * So add it as a contiguous one. The caller will handle
  3931. * its init process.
  3932. */
  3933. v_start = prev_cpos + prev_clusters;
  3934. *num_clusters = prev_clusters + num_bits;
  3935. mlog(0, "Add contiguous %u clusters to previous extent rec.\n",
  3936. num_bits);
  3937. } else {
  3938. ret = ocfs2_adjust_xattr_cross_cluster(inode,
  3939. handle,
  3940. first,
  3941. target,
  3942. block,
  3943. prev_clusters,
  3944. &v_start,
  3945. extend);
  3946. if (ret) {
  3947. mlog_errno(ret);
  3948. goto leave;
  3949. }
  3950. }
  3951. mlog(0, "Insert %u clusters at block %llu for xattr at %u\n",
  3952. num_bits, (unsigned long long)block, v_start);
  3953. ret = ocfs2_insert_extent(handle, &et, v_start, block,
  3954. num_bits, 0, ctxt->meta_ac);
  3955. if (ret < 0) {
  3956. mlog_errno(ret);
  3957. goto leave;
  3958. }
  3959. ret = ocfs2_journal_dirty(handle, root_bh);
  3960. if (ret < 0)
  3961. mlog_errno(ret);
  3962. leave:
  3963. return ret;
  3964. }
  3965. /*
  3966. * We are given an extent. 'first' is the bucket at the very front of
  3967. * the extent. The extent has space for an additional bucket past
  3968. * bucket_xh(first)->xh_num_buckets. 'target_blkno' is the block number
  3969. * of the target bucket. We wish to shift every bucket past the target
  3970. * down one, filling in that additional space. When we get back to the
  3971. * target, we split the target between itself and the now-empty bucket
  3972. * at target+1 (aka, target_blkno + blks_per_bucket).
  3973. */
  3974. static int ocfs2_extend_xattr_bucket(struct inode *inode,
  3975. handle_t *handle,
  3976. struct ocfs2_xattr_bucket *first,
  3977. u64 target_blk,
  3978. u32 num_clusters)
  3979. {
  3980. int ret, credits;
  3981. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3982. u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
  3983. u64 end_blk;
  3984. u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets);
  3985. mlog(0, "extend xattr bucket in %llu, xattr extend rec starting "
  3986. "from %llu, len = %u\n", (unsigned long long)target_blk,
  3987. (unsigned long long)bucket_blkno(first), num_clusters);
  3988. /* The extent must have room for an additional bucket */
  3989. BUG_ON(new_bucket >=
  3990. (num_clusters * ocfs2_xattr_buckets_per_cluster(osb)));
  3991. /* end_blk points to the last existing bucket */
  3992. end_blk = bucket_blkno(first) + ((new_bucket - 1) * blk_per_bucket);
  3993. /*
  3994. * end_blk is the start of the last existing bucket.
  3995. * Thus, (end_blk - target_blk) covers the target bucket and
  3996. * every bucket after it up to, but not including, the last
  3997. * existing bucket. Then we add the last existing bucket, the
  3998. * new bucket, and the first bucket (3 * blk_per_bucket).
  3999. */
  4000. credits = (end_blk - target_blk) + (3 * blk_per_bucket) +
  4001. handle->h_buffer_credits;
  4002. ret = ocfs2_extend_trans(handle, credits);
  4003. if (ret) {
  4004. mlog_errno(ret);
  4005. goto out;
  4006. }
  4007. ret = ocfs2_xattr_bucket_journal_access(handle, first,
  4008. OCFS2_JOURNAL_ACCESS_WRITE);
  4009. if (ret) {
  4010. mlog_errno(ret);
  4011. goto out;
  4012. }
  4013. while (end_blk != target_blk) {
  4014. ret = ocfs2_cp_xattr_bucket(inode, handle, end_blk,
  4015. end_blk + blk_per_bucket, 0);
  4016. if (ret)
  4017. goto out;
  4018. end_blk -= blk_per_bucket;
  4019. }
  4020. /* Move half of the xattr in target_blkno to the next bucket. */
  4021. ret = ocfs2_divide_xattr_bucket(inode, handle, target_blk,
  4022. target_blk + blk_per_bucket, NULL, 0);
  4023. le16_add_cpu(&bucket_xh(first)->xh_num_buckets, 1);
  4024. ocfs2_xattr_bucket_journal_dirty(handle, first);
  4025. out:
  4026. return ret;
  4027. }
  4028. /*
  4029. * Add new xattr bucket in an extent record and adjust the buckets
  4030. * accordingly. xb_bh is the ocfs2_xattr_block, and target is the
  4031. * bucket we want to insert into.
  4032. *
  4033. * In the easy case, we will move all the buckets after target down by
  4034. * one. Half of target's xattrs will be moved to the next bucket.
  4035. *
  4036. * If current cluster is full, we'll allocate a new one. This may not
  4037. * be contiguous. The underlying calls will make sure that there is
  4038. * space for the insert, shifting buckets around if necessary.
  4039. * 'target' may be moved by those calls.
  4040. */
  4041. static int ocfs2_add_new_xattr_bucket(struct inode *inode,
  4042. struct buffer_head *xb_bh,
  4043. struct ocfs2_xattr_bucket *target,
  4044. struct ocfs2_xattr_set_ctxt *ctxt)
  4045. {
  4046. struct ocfs2_xattr_block *xb =
  4047. (struct ocfs2_xattr_block *)xb_bh->b_data;
  4048. struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root;
  4049. struct ocfs2_extent_list *el = &xb_root->xt_list;
  4050. u32 name_hash =
  4051. le32_to_cpu(bucket_xh(target)->xh_entries[0].xe_name_hash);
  4052. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  4053. int ret, num_buckets, extend = 1;
  4054. u64 p_blkno;
  4055. u32 e_cpos, num_clusters;
  4056. /* The bucket at the front of the extent */
  4057. struct ocfs2_xattr_bucket *first;
  4058. mlog(0, "Add new xattr bucket starting from %llu\n",
  4059. (unsigned long long)bucket_blkno(target));
  4060. /* The first bucket of the original extent */
  4061. first = ocfs2_xattr_bucket_new(inode);
  4062. if (!first) {
  4063. ret = -ENOMEM;
  4064. mlog_errno(ret);
  4065. goto out;
  4066. }
  4067. ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &e_cpos,
  4068. &num_clusters, el);
  4069. if (ret) {
  4070. mlog_errno(ret);
  4071. goto out;
  4072. }
  4073. ret = ocfs2_read_xattr_bucket(first, p_blkno);
  4074. if (ret) {
  4075. mlog_errno(ret);
  4076. goto out;
  4077. }
  4078. num_buckets = ocfs2_xattr_buckets_per_cluster(osb) * num_clusters;
  4079. if (num_buckets == le16_to_cpu(bucket_xh(first)->xh_num_buckets)) {
  4080. /*
  4081. * This can move first+target if the target bucket moves
  4082. * to the new extent.
  4083. */
  4084. ret = ocfs2_add_new_xattr_cluster(inode,
  4085. xb_bh,
  4086. first,
  4087. target,
  4088. &num_clusters,
  4089. e_cpos,
  4090. &extend,
  4091. ctxt);
  4092. if (ret) {
  4093. mlog_errno(ret);
  4094. goto out;
  4095. }
  4096. }
  4097. if (extend) {
  4098. ret = ocfs2_extend_xattr_bucket(inode,
  4099. ctxt->handle,
  4100. first,
  4101. bucket_blkno(target),
  4102. num_clusters);
  4103. if (ret)
  4104. mlog_errno(ret);
  4105. }
  4106. out:
  4107. ocfs2_xattr_bucket_free(first);
  4108. return ret;
  4109. }
  4110. static inline char *ocfs2_xattr_bucket_get_val(struct inode *inode,
  4111. struct ocfs2_xattr_bucket *bucket,
  4112. int offs)
  4113. {
  4114. int block_off = offs >> inode->i_sb->s_blocksize_bits;
  4115. offs = offs % inode->i_sb->s_blocksize;
  4116. return bucket_block(bucket, block_off) + offs;
  4117. }
  4118. /*
  4119. * Handle the normal xattr set, including replace, delete and new.
  4120. *
  4121. * Note: "local" indicates the real data's locality. So we can't
  4122. * just its bucket locality by its length.
  4123. */
  4124. static void ocfs2_xattr_set_entry_normal(struct inode *inode,
  4125. struct ocfs2_xattr_info *xi,
  4126. struct ocfs2_xattr_search *xs,
  4127. u32 name_hash,
  4128. int local)
  4129. {
  4130. struct ocfs2_xattr_entry *last, *xe;
  4131. int name_len = strlen(xi->name);
  4132. struct ocfs2_xattr_header *xh = xs->header;
  4133. u16 count = le16_to_cpu(xh->xh_count), start;
  4134. size_t blocksize = inode->i_sb->s_blocksize;
  4135. char *val;
  4136. size_t offs, size, new_size;
  4137. last = &xh->xh_entries[count];
  4138. if (!xs->not_found) {
  4139. xe = xs->here;
  4140. offs = le16_to_cpu(xe->xe_name_offset);
  4141. if (ocfs2_xattr_is_local(xe))
  4142. size = OCFS2_XATTR_SIZE(name_len) +
  4143. OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
  4144. else
  4145. size = OCFS2_XATTR_SIZE(name_len) +
  4146. OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE);
  4147. /*
  4148. * If the new value will be stored outside, xi->value has been
  4149. * initalized as an empty ocfs2_xattr_value_root, and the same
  4150. * goes with xi->value_len, so we can set new_size safely here.
  4151. * See ocfs2_xattr_set_in_bucket.
  4152. */
  4153. new_size = OCFS2_XATTR_SIZE(name_len) +
  4154. OCFS2_XATTR_SIZE(xi->value_len);
  4155. le16_add_cpu(&xh->xh_name_value_len, -size);
  4156. if (xi->value) {
  4157. if (new_size > size)
  4158. goto set_new_name_value;
  4159. /* Now replace the old value with new one. */
  4160. if (local)
  4161. xe->xe_value_size = cpu_to_le64(xi->value_len);
  4162. else
  4163. xe->xe_value_size = 0;
  4164. val = ocfs2_xattr_bucket_get_val(inode,
  4165. xs->bucket, offs);
  4166. memset(val + OCFS2_XATTR_SIZE(name_len), 0,
  4167. size - OCFS2_XATTR_SIZE(name_len));
  4168. if (OCFS2_XATTR_SIZE(xi->value_len) > 0)
  4169. memcpy(val + OCFS2_XATTR_SIZE(name_len),
  4170. xi->value, xi->value_len);
  4171. le16_add_cpu(&xh->xh_name_value_len, new_size);
  4172. ocfs2_xattr_set_local(xe, local);
  4173. return;
  4174. } else {
  4175. /*
  4176. * Remove the old entry if there is more than one.
  4177. * We don't remove the last entry so that we can
  4178. * use it to indicate the hash value of the empty
  4179. * bucket.
  4180. */
  4181. last -= 1;
  4182. le16_add_cpu(&xh->xh_count, -1);
  4183. if (xh->xh_count) {
  4184. memmove(xe, xe + 1,
  4185. (void *)last - (void *)xe);
  4186. memset(last, 0,
  4187. sizeof(struct ocfs2_xattr_entry));
  4188. } else
  4189. xh->xh_free_start =
  4190. cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
  4191. return;
  4192. }
  4193. } else {
  4194. /* find a new entry for insert. */
  4195. int low = 0, high = count - 1, tmp;
  4196. struct ocfs2_xattr_entry *tmp_xe;
  4197. while (low <= high && count) {
  4198. tmp = (low + high) / 2;
  4199. tmp_xe = &xh->xh_entries[tmp];
  4200. if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
  4201. low = tmp + 1;
  4202. else if (name_hash <
  4203. le32_to_cpu(tmp_xe->xe_name_hash))
  4204. high = tmp - 1;
  4205. else {
  4206. low = tmp;
  4207. break;
  4208. }
  4209. }
  4210. xe = &xh->xh_entries[low];
  4211. if (low != count)
  4212. memmove(xe + 1, xe, (void *)last - (void *)xe);
  4213. le16_add_cpu(&xh->xh_count, 1);
  4214. memset(xe, 0, sizeof(struct ocfs2_xattr_entry));
  4215. xe->xe_name_hash = cpu_to_le32(name_hash);
  4216. xe->xe_name_len = name_len;
  4217. ocfs2_xattr_set_type(xe, xi->name_index);
  4218. }
  4219. set_new_name_value:
  4220. /* Insert the new name+value. */
  4221. size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(xi->value_len);
  4222. /*
  4223. * We must make sure that the name/value pair
  4224. * exists in the same block.
  4225. */
  4226. offs = le16_to_cpu(xh->xh_free_start);
  4227. start = offs - size;
  4228. if (start >> inode->i_sb->s_blocksize_bits !=
  4229. (offs - 1) >> inode->i_sb->s_blocksize_bits) {
  4230. offs = offs - offs % blocksize;
  4231. xh->xh_free_start = cpu_to_le16(offs);
  4232. }
  4233. val = ocfs2_xattr_bucket_get_val(inode, xs->bucket, offs - size);
  4234. xe->xe_name_offset = cpu_to_le16(offs - size);
  4235. memset(val, 0, size);
  4236. memcpy(val, xi->name, name_len);
  4237. memcpy(val + OCFS2_XATTR_SIZE(name_len), xi->value, xi->value_len);
  4238. xe->xe_value_size = cpu_to_le64(xi->value_len);
  4239. ocfs2_xattr_set_local(xe, local);
  4240. xs->here = xe;
  4241. le16_add_cpu(&xh->xh_free_start, -size);
  4242. le16_add_cpu(&xh->xh_name_value_len, size);
  4243. return;
  4244. }
  4245. /*
  4246. * Set the xattr entry in the specified bucket.
  4247. * The bucket is indicated by xs->bucket and it should have the enough
  4248. * space for the xattr insertion.
  4249. */
  4250. static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode,
  4251. handle_t *handle,
  4252. struct ocfs2_xattr_info *xi,
  4253. struct ocfs2_xattr_search *xs,
  4254. u32 name_hash,
  4255. int local)
  4256. {
  4257. int ret;
  4258. u64 blkno;
  4259. mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n",
  4260. (unsigned long)xi->value_len, xi->name_index,
  4261. (unsigned long long)bucket_blkno(xs->bucket));
  4262. if (!xs->bucket->bu_bhs[1]) {
  4263. blkno = bucket_blkno(xs->bucket);
  4264. ocfs2_xattr_bucket_relse(xs->bucket);
  4265. ret = ocfs2_read_xattr_bucket(xs->bucket, blkno);
  4266. if (ret) {
  4267. mlog_errno(ret);
  4268. goto out;
  4269. }
  4270. }
  4271. ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
  4272. OCFS2_JOURNAL_ACCESS_WRITE);
  4273. if (ret < 0) {
  4274. mlog_errno(ret);
  4275. goto out;
  4276. }
  4277. ocfs2_xattr_set_entry_normal(inode, xi, xs, name_hash, local);
  4278. ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
  4279. out:
  4280. return ret;
  4281. }
  4282. /*
  4283. * Truncate the specified xe_off entry in xattr bucket.
  4284. * bucket is indicated by header_bh and len is the new length.
  4285. * Both the ocfs2_xattr_value_root and the entry will be updated here.
  4286. *
  4287. * Copy the new updated xe and xe_value_root to new_xe and new_xv if needed.
  4288. */
  4289. static int ocfs2_xattr_bucket_value_truncate(struct inode *inode,
  4290. struct ocfs2_xattr_bucket *bucket,
  4291. int xe_off,
  4292. int len,
  4293. struct ocfs2_xattr_set_ctxt *ctxt)
  4294. {
  4295. int ret, offset;
  4296. u64 value_blk;
  4297. struct ocfs2_xattr_entry *xe;
  4298. struct ocfs2_xattr_header *xh = bucket_xh(bucket);
  4299. size_t blocksize = inode->i_sb->s_blocksize;
  4300. struct ocfs2_xattr_value_buf vb = {
  4301. .vb_access = ocfs2_journal_access,
  4302. };
  4303. xe = &xh->xh_entries[xe_off];
  4304. BUG_ON(!xe || ocfs2_xattr_is_local(xe));
  4305. offset = le16_to_cpu(xe->xe_name_offset) +
  4306. OCFS2_XATTR_SIZE(xe->xe_name_len);
  4307. value_blk = offset / blocksize;
  4308. /* We don't allow ocfs2_xattr_value to be stored in different block. */
  4309. BUG_ON(value_blk != (offset + OCFS2_XATTR_ROOT_SIZE - 1) / blocksize);
  4310. vb.vb_bh = bucket->bu_bhs[value_blk];
  4311. BUG_ON(!vb.vb_bh);
  4312. vb.vb_xv = (struct ocfs2_xattr_value_root *)
  4313. (vb.vb_bh->b_data + offset % blocksize);
  4314. /*
  4315. * From here on out we have to dirty the bucket. The generic
  4316. * value calls only modify one of the bucket's bhs, but we need
  4317. * to send the bucket at once. So if they error, they *could* have
  4318. * modified something. We have to assume they did, and dirty
  4319. * the whole bucket. This leaves us in a consistent state.
  4320. */
  4321. mlog(0, "truncate %u in xattr bucket %llu to %d bytes.\n",
  4322. xe_off, (unsigned long long)bucket_blkno(bucket), len);
  4323. ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt);
  4324. if (ret) {
  4325. mlog_errno(ret);
  4326. goto out;
  4327. }
  4328. ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket,
  4329. OCFS2_JOURNAL_ACCESS_WRITE);
  4330. if (ret) {
  4331. mlog_errno(ret);
  4332. goto out;
  4333. }
  4334. xe->xe_value_size = cpu_to_le64(len);
  4335. ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket);
  4336. out:
  4337. return ret;
  4338. }
  4339. static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode,
  4340. struct ocfs2_xattr_search *xs,
  4341. int len,
  4342. struct ocfs2_xattr_set_ctxt *ctxt)
  4343. {
  4344. int ret, offset;
  4345. struct ocfs2_xattr_entry *xe = xs->here;
  4346. struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)xs->base;
  4347. BUG_ON(!xs->bucket->bu_bhs[0] || !xe || ocfs2_xattr_is_local(xe));
  4348. offset = xe - xh->xh_entries;
  4349. ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket,
  4350. offset, len, ctxt);
  4351. if (ret)
  4352. mlog_errno(ret);
  4353. return ret;
  4354. }
  4355. static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode,
  4356. handle_t *handle,
  4357. struct ocfs2_xattr_search *xs,
  4358. char *val,
  4359. int value_len)
  4360. {
  4361. int ret, offset, block_off;
  4362. struct ocfs2_xattr_value_root *xv;
  4363. struct ocfs2_xattr_entry *xe = xs->here;
  4364. struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket);
  4365. void *base;
  4366. struct ocfs2_xattr_value_buf vb = {
  4367. .vb_access = ocfs2_journal_access,
  4368. };
  4369. BUG_ON(!xs->base || !xe || ocfs2_xattr_is_local(xe));
  4370. ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, xh,
  4371. xe - xh->xh_entries,
  4372. &block_off,
  4373. &offset);
  4374. if (ret) {
  4375. mlog_errno(ret);
  4376. goto out;
  4377. }
  4378. base = bucket_block(xs->bucket, block_off);
  4379. xv = (struct ocfs2_xattr_value_root *)(base + offset +
  4380. OCFS2_XATTR_SIZE(xe->xe_name_len));
  4381. vb.vb_xv = xv;
  4382. vb.vb_bh = xs->bucket->bu_bhs[block_off];
  4383. ret = __ocfs2_xattr_set_value_outside(inode, handle,
  4384. &vb, val, value_len);
  4385. if (ret)
  4386. mlog_errno(ret);
  4387. out:
  4388. return ret;
  4389. }
  4390. static int ocfs2_rm_xattr_cluster(struct inode *inode,
  4391. struct buffer_head *root_bh,
  4392. u64 blkno,
  4393. u32 cpos,
  4394. u32 len,
  4395. void *para)
  4396. {
  4397. int ret;
  4398. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  4399. struct inode *tl_inode = osb->osb_tl_inode;
  4400. handle_t *handle;
  4401. struct ocfs2_xattr_block *xb =
  4402. (struct ocfs2_xattr_block *)root_bh->b_data;
  4403. struct ocfs2_alloc_context *meta_ac = NULL;
  4404. struct ocfs2_cached_dealloc_ctxt dealloc;
  4405. struct ocfs2_extent_tree et;
  4406. ret = ocfs2_iterate_xattr_buckets(inode, blkno, len,
  4407. ocfs2_delete_xattr_in_bucket, para);
  4408. if (ret) {
  4409. mlog_errno(ret);
  4410. return ret;
  4411. }
  4412. ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
  4413. ocfs2_init_dealloc_ctxt(&dealloc);
  4414. mlog(0, "rm xattr extent rec at %u len = %u, start from %llu\n",
  4415. cpos, len, (unsigned long long)blkno);
  4416. ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno,
  4417. len);
  4418. ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac);
  4419. if (ret) {
  4420. mlog_errno(ret);
  4421. return ret;
  4422. }
  4423. mutex_lock(&tl_inode->i_mutex);
  4424. if (ocfs2_truncate_log_needs_flush(osb)) {
  4425. ret = __ocfs2_flush_truncate_log(osb);
  4426. if (ret < 0) {
  4427. mlog_errno(ret);
  4428. goto out;
  4429. }
  4430. }
  4431. handle = ocfs2_start_trans(osb, ocfs2_remove_extent_credits(osb->sb));
  4432. if (IS_ERR(handle)) {
  4433. ret = -ENOMEM;
  4434. mlog_errno(ret);
  4435. goto out;
  4436. }
  4437. ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
  4438. OCFS2_JOURNAL_ACCESS_WRITE);
  4439. if (ret) {
  4440. mlog_errno(ret);
  4441. goto out_commit;
  4442. }
  4443. ret = ocfs2_remove_extent(handle, &et, cpos, len, meta_ac,
  4444. &dealloc);
  4445. if (ret) {
  4446. mlog_errno(ret);
  4447. goto out_commit;
  4448. }
  4449. le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, -len);
  4450. ret = ocfs2_journal_dirty(handle, root_bh);
  4451. if (ret) {
  4452. mlog_errno(ret);
  4453. goto out_commit;
  4454. }
  4455. ret = ocfs2_truncate_log_append(osb, handle, blkno, len);
  4456. if (ret)
  4457. mlog_errno(ret);
  4458. out_commit:
  4459. ocfs2_commit_trans(osb, handle);
  4460. out:
  4461. ocfs2_schedule_truncate_log_flush(osb, 1);
  4462. mutex_unlock(&tl_inode->i_mutex);
  4463. if (meta_ac)
  4464. ocfs2_free_alloc_context(meta_ac);
  4465. ocfs2_run_deallocs(osb, &dealloc);
  4466. return ret;
  4467. }
  4468. static void ocfs2_xattr_bucket_remove_xs(struct inode *inode,
  4469. handle_t *handle,
  4470. struct ocfs2_xattr_search *xs)
  4471. {
  4472. struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket);
  4473. struct ocfs2_xattr_entry *last = &xh->xh_entries[
  4474. le16_to_cpu(xh->xh_count) - 1];
  4475. int ret = 0;
  4476. ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
  4477. OCFS2_JOURNAL_ACCESS_WRITE);
  4478. if (ret) {
  4479. mlog_errno(ret);
  4480. return;
  4481. }
  4482. /* Remove the old entry. */
  4483. memmove(xs->here, xs->here + 1,
  4484. (void *)last - (void *)xs->here);
  4485. memset(last, 0, sizeof(struct ocfs2_xattr_entry));
  4486. le16_add_cpu(&xh->xh_count, -1);
  4487. ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
  4488. }
  4489. /*
  4490. * Set the xattr name/value in the bucket specified in xs.
  4491. *
  4492. * As the new value in xi may be stored in the bucket or in an outside cluster,
  4493. * we divide the whole process into 3 steps:
  4494. * 1. insert name/value in the bucket(ocfs2_xattr_set_entry_in_bucket)
  4495. * 2. truncate of the outside cluster(ocfs2_xattr_bucket_value_truncate_xs)
  4496. * 3. Set the value to the outside cluster(ocfs2_xattr_bucket_set_value_outside)
  4497. * 4. If the clusters for the new outside value can't be allocated, we need
  4498. * to free the xattr we allocated in set.
  4499. */
  4500. static int ocfs2_xattr_set_in_bucket(struct inode *inode,
  4501. struct ocfs2_xattr_info *xi,
  4502. struct ocfs2_xattr_search *xs,
  4503. struct ocfs2_xattr_set_ctxt *ctxt)
  4504. {
  4505. int ret, local = 1;
  4506. size_t value_len;
  4507. char *val = (char *)xi->value;
  4508. struct ocfs2_xattr_entry *xe = xs->here;
  4509. u32 name_hash = ocfs2_xattr_name_hash(inode, xi->name,
  4510. strlen(xi->name));
  4511. if (!xs->not_found && !ocfs2_xattr_is_local(xe)) {
  4512. /*
  4513. * We need to truncate the xattr storage first.
  4514. *
  4515. * If both the old and new value are stored to
  4516. * outside block, we only need to truncate
  4517. * the storage and then set the value outside.
  4518. *
  4519. * If the new value should be stored within block,
  4520. * we should free all the outside block first and
  4521. * the modification to the xattr block will be done
  4522. * by following steps.
  4523. */
  4524. if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
  4525. value_len = xi->value_len;
  4526. else
  4527. value_len = 0;
  4528. ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs,
  4529. value_len,
  4530. ctxt);
  4531. if (ret)
  4532. goto out;
  4533. if (value_len)
  4534. goto set_value_outside;
  4535. }
  4536. value_len = xi->value_len;
  4537. /* So we have to handle the inside block change now. */
  4538. if (value_len > OCFS2_XATTR_INLINE_SIZE) {
  4539. /*
  4540. * If the new value will be stored outside of block,
  4541. * initalize a new empty value root and insert it first.
  4542. */
  4543. local = 0;
  4544. xi->value = &def_xv;
  4545. xi->value_len = OCFS2_XATTR_ROOT_SIZE;
  4546. }
  4547. ret = ocfs2_xattr_set_entry_in_bucket(inode, ctxt->handle, xi, xs,
  4548. name_hash, local);
  4549. if (ret) {
  4550. mlog_errno(ret);
  4551. goto out;
  4552. }
  4553. if (value_len <= OCFS2_XATTR_INLINE_SIZE)
  4554. goto out;
  4555. /* allocate the space now for the outside block storage. */
  4556. ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs,
  4557. value_len, ctxt);
  4558. if (ret) {
  4559. mlog_errno(ret);
  4560. if (xs->not_found) {
  4561. /*
  4562. * We can't allocate enough clusters for outside
  4563. * storage and we have allocated xattr already,
  4564. * so need to remove it.
  4565. */
  4566. ocfs2_xattr_bucket_remove_xs(inode, ctxt->handle, xs);
  4567. }
  4568. goto out;
  4569. }
  4570. set_value_outside:
  4571. ret = ocfs2_xattr_bucket_set_value_outside(inode, ctxt->handle,
  4572. xs, val, value_len);
  4573. out:
  4574. return ret;
  4575. }
  4576. /*
  4577. * check whether the xattr bucket is filled up with the same hash value.
  4578. * If we want to insert the xattr with the same hash, return -ENOSPC.
  4579. * If we want to insert a xattr with different hash value, go ahead
  4580. * and ocfs2_divide_xattr_bucket will handle this.
  4581. */
  4582. static int ocfs2_check_xattr_bucket_collision(struct inode *inode,
  4583. struct ocfs2_xattr_bucket *bucket,
  4584. const char *name)
  4585. {
  4586. struct ocfs2_xattr_header *xh = bucket_xh(bucket);
  4587. u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name));
  4588. if (name_hash != le32_to_cpu(xh->xh_entries[0].xe_name_hash))
  4589. return 0;
  4590. if (xh->xh_entries[le16_to_cpu(xh->xh_count) - 1].xe_name_hash ==
  4591. xh->xh_entries[0].xe_name_hash) {
  4592. mlog(ML_ERROR, "Too much hash collision in xattr bucket %llu, "
  4593. "hash = %u\n",
  4594. (unsigned long long)bucket_blkno(bucket),
  4595. le32_to_cpu(xh->xh_entries[0].xe_name_hash));
  4596. return -ENOSPC;
  4597. }
  4598. return 0;
  4599. }
  4600. static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
  4601. struct ocfs2_xattr_info *xi,
  4602. struct ocfs2_xattr_search *xs,
  4603. struct ocfs2_xattr_set_ctxt *ctxt)
  4604. {
  4605. struct ocfs2_xattr_header *xh;
  4606. struct ocfs2_xattr_entry *xe;
  4607. u16 count, header_size, xh_free_start;
  4608. int free, max_free, need, old;
  4609. size_t value_size = 0, name_len = strlen(xi->name);
  4610. size_t blocksize = inode->i_sb->s_blocksize;
  4611. int ret, allocation = 0;
  4612. mlog_entry("Set xattr %s in xattr index block\n", xi->name);
  4613. try_again:
  4614. xh = xs->header;
  4615. count = le16_to_cpu(xh->xh_count);
  4616. xh_free_start = le16_to_cpu(xh->xh_free_start);
  4617. header_size = sizeof(struct ocfs2_xattr_header) +
  4618. count * sizeof(struct ocfs2_xattr_entry);
  4619. max_free = OCFS2_XATTR_BUCKET_SIZE - header_size -
  4620. le16_to_cpu(xh->xh_name_value_len) - OCFS2_XATTR_HEADER_GAP;
  4621. mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size "
  4622. "of %u which exceed block size\n",
  4623. (unsigned long long)bucket_blkno(xs->bucket),
  4624. header_size);
  4625. if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE)
  4626. value_size = OCFS2_XATTR_ROOT_SIZE;
  4627. else if (xi->value)
  4628. value_size = OCFS2_XATTR_SIZE(xi->value_len);
  4629. if (xs->not_found)
  4630. need = sizeof(struct ocfs2_xattr_entry) +
  4631. OCFS2_XATTR_SIZE(name_len) + value_size;
  4632. else {
  4633. need = value_size + OCFS2_XATTR_SIZE(name_len);
  4634. /*
  4635. * We only replace the old value if the new length is smaller
  4636. * than the old one. Otherwise we will allocate new space in the
  4637. * bucket to store it.
  4638. */
  4639. xe = xs->here;
  4640. if (ocfs2_xattr_is_local(xe))
  4641. old = OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
  4642. else
  4643. old = OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE);
  4644. if (old >= value_size)
  4645. need = 0;
  4646. }
  4647. free = xh_free_start - header_size - OCFS2_XATTR_HEADER_GAP;
  4648. /*
  4649. * We need to make sure the new name/value pair
  4650. * can exist in the same block.
  4651. */
  4652. if (xh_free_start % blocksize < need)
  4653. free -= xh_free_start % blocksize;
  4654. mlog(0, "xs->not_found = %d, in xattr bucket %llu: free = %d, "
  4655. "need = %d, max_free = %d, xh_free_start = %u, xh_name_value_len ="
  4656. " %u\n", xs->not_found,
  4657. (unsigned long long)bucket_blkno(xs->bucket),
  4658. free, need, max_free, le16_to_cpu(xh->xh_free_start),
  4659. le16_to_cpu(xh->xh_name_value_len));
  4660. if (free < need ||
  4661. (xs->not_found &&
  4662. count == ocfs2_xattr_max_xe_in_bucket(inode->i_sb))) {
  4663. if (need <= max_free &&
  4664. count < ocfs2_xattr_max_xe_in_bucket(inode->i_sb)) {
  4665. /*
  4666. * We can create the space by defragment. Since only the
  4667. * name/value will be moved, the xe shouldn't be changed
  4668. * in xs.
  4669. */
  4670. ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
  4671. xs->bucket);
  4672. if (ret) {
  4673. mlog_errno(ret);
  4674. goto out;
  4675. }
  4676. xh_free_start = le16_to_cpu(xh->xh_free_start);
  4677. free = xh_free_start - header_size
  4678. - OCFS2_XATTR_HEADER_GAP;
  4679. if (xh_free_start % blocksize < need)
  4680. free -= xh_free_start % blocksize;
  4681. if (free >= need)
  4682. goto xattr_set;
  4683. mlog(0, "Can't get enough space for xattr insert by "
  4684. "defragment. Need %u bytes, but we have %d, so "
  4685. "allocate new bucket for it.\n", need, free);
  4686. }
  4687. /*
  4688. * We have to add new buckets or clusters and one
  4689. * allocation should leave us enough space for insert.
  4690. */
  4691. BUG_ON(allocation);
  4692. /*
  4693. * We do not allow for overlapping ranges between buckets. And
  4694. * the maximum number of collisions we will allow for then is
  4695. * one bucket's worth, so check it here whether we need to
  4696. * add a new bucket for the insert.
  4697. */
  4698. ret = ocfs2_check_xattr_bucket_collision(inode,
  4699. xs->bucket,
  4700. xi->name);
  4701. if (ret) {
  4702. mlog_errno(ret);
  4703. goto out;
  4704. }
  4705. ret = ocfs2_add_new_xattr_bucket(inode,
  4706. xs->xattr_bh,
  4707. xs->bucket,
  4708. ctxt);
  4709. if (ret) {
  4710. mlog_errno(ret);
  4711. goto out;
  4712. }
  4713. /*
  4714. * ocfs2_add_new_xattr_bucket() will have updated
  4715. * xs->bucket if it moved, but it will not have updated
  4716. * any of the other search fields. Thus, we drop it and
  4717. * re-search. Everything should be cached, so it'll be
  4718. * quick.
  4719. */
  4720. ocfs2_xattr_bucket_relse(xs->bucket);
  4721. ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
  4722. xi->name_index,
  4723. xi->name, xs);
  4724. if (ret && ret != -ENODATA)
  4725. goto out;
  4726. xs->not_found = ret;
  4727. allocation = 1;
  4728. goto try_again;
  4729. }
  4730. xattr_set:
  4731. ret = ocfs2_xattr_set_in_bucket(inode, xi, xs, ctxt);
  4732. out:
  4733. mlog_exit(ret);
  4734. return ret;
  4735. }
  4736. static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
  4737. struct ocfs2_xattr_bucket *bucket,
  4738. void *para)
  4739. {
  4740. int ret = 0, ref_credits;
  4741. struct ocfs2_xattr_header *xh = bucket_xh(bucket);
  4742. u16 i;
  4743. struct ocfs2_xattr_entry *xe;
  4744. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  4745. struct ocfs2_xattr_set_ctxt ctxt = {NULL, NULL,};
  4746. int credits = ocfs2_remove_extent_credits(osb->sb) +
  4747. ocfs2_blocks_per_xattr_bucket(inode->i_sb);
  4748. struct ocfs2_xattr_value_root *xv;
  4749. struct ocfs2_rm_xattr_bucket_para *args =
  4750. (struct ocfs2_rm_xattr_bucket_para *)para;
  4751. ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
  4752. for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
  4753. xe = &xh->xh_entries[i];
  4754. if (ocfs2_xattr_is_local(xe))
  4755. continue;
  4756. ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket,
  4757. i, &xv, NULL);
  4758. ret = ocfs2_lock_xattr_remove_allocators(inode, xv,
  4759. args->ref_ci,
  4760. args->ref_root_bh,
  4761. &ctxt.meta_ac,
  4762. &ref_credits);
  4763. ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
  4764. if (IS_ERR(ctxt.handle)) {
  4765. ret = PTR_ERR(ctxt.handle);
  4766. mlog_errno(ret);
  4767. break;
  4768. }
  4769. ret = ocfs2_xattr_bucket_value_truncate(inode, bucket,
  4770. i, 0, &ctxt);
  4771. ocfs2_commit_trans(osb, ctxt.handle);
  4772. if (ctxt.meta_ac) {
  4773. ocfs2_free_alloc_context(ctxt.meta_ac);
  4774. ctxt.meta_ac = NULL;
  4775. }
  4776. if (ret) {
  4777. mlog_errno(ret);
  4778. break;
  4779. }
  4780. }
  4781. if (ctxt.meta_ac)
  4782. ocfs2_free_alloc_context(ctxt.meta_ac);
  4783. ocfs2_schedule_truncate_log_flush(osb, 1);
  4784. ocfs2_run_deallocs(osb, &ctxt.dealloc);
  4785. return ret;
  4786. }
  4787. /*
  4788. * Whenever we modify a xattr value root in the bucket(e.g, CoW
  4789. * or change the extent record flag), we need to recalculate
  4790. * the metaecc for the whole bucket. So it is done here.
  4791. *
  4792. * Note:
  4793. * We have to give the extra credits for the caller.
  4794. */
  4795. static int ocfs2_xattr_bucket_post_refcount(struct inode *inode,
  4796. handle_t *handle,
  4797. void *para)
  4798. {
  4799. int ret;
  4800. struct ocfs2_xattr_bucket *bucket =
  4801. (struct ocfs2_xattr_bucket *)para;
  4802. ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
  4803. OCFS2_JOURNAL_ACCESS_WRITE);
  4804. if (ret) {
  4805. mlog_errno(ret);
  4806. return ret;
  4807. }
  4808. ocfs2_xattr_bucket_journal_dirty(handle, bucket);
  4809. return 0;
  4810. }
  4811. /*
  4812. * Special action we need if the xattr value is refcounted.
  4813. *
  4814. * 1. If the xattr is refcounted, lock the tree.
  4815. * 2. CoW the xattr if we are setting the new value and the value
  4816. * will be stored outside.
  4817. * 3. In other case, decrease_refcount will work for us, so just
  4818. * lock the refcount tree, calculate the meta and credits is OK.
  4819. *
  4820. * We have to do CoW before ocfs2_init_xattr_set_ctxt since
  4821. * currently CoW is a completed transaction, while this function
  4822. * will also lock the allocators and let us deadlock. So we will
  4823. * CoW the whole xattr value.
  4824. */
  4825. static int ocfs2_prepare_refcount_xattr(struct inode *inode,
  4826. struct ocfs2_dinode *di,
  4827. struct ocfs2_xattr_info *xi,
  4828. struct ocfs2_xattr_search *xis,
  4829. struct ocfs2_xattr_search *xbs,
  4830. struct ocfs2_refcount_tree **ref_tree,
  4831. int *meta_add,
  4832. int *credits)
  4833. {
  4834. int ret = 0;
  4835. struct ocfs2_xattr_block *xb;
  4836. struct ocfs2_xattr_entry *xe;
  4837. char *base;
  4838. u32 p_cluster, num_clusters;
  4839. unsigned int ext_flags;
  4840. int name_offset, name_len;
  4841. struct ocfs2_xattr_value_buf vb;
  4842. struct ocfs2_xattr_bucket *bucket = NULL;
  4843. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  4844. struct ocfs2_post_refcount refcount;
  4845. struct ocfs2_post_refcount *p = NULL;
  4846. struct buffer_head *ref_root_bh = NULL;
  4847. if (!xis->not_found) {
  4848. xe = xis->here;
  4849. name_offset = le16_to_cpu(xe->xe_name_offset);
  4850. name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
  4851. base = xis->base;
  4852. vb.vb_bh = xis->inode_bh;
  4853. vb.vb_access = ocfs2_journal_access_di;
  4854. } else {
  4855. int i, block_off = 0;
  4856. xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
  4857. xe = xbs->here;
  4858. name_offset = le16_to_cpu(xe->xe_name_offset);
  4859. name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
  4860. i = xbs->here - xbs->header->xh_entries;
  4861. if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
  4862. ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
  4863. bucket_xh(xbs->bucket),
  4864. i, &block_off,
  4865. &name_offset);
  4866. if (ret) {
  4867. mlog_errno(ret);
  4868. goto out;
  4869. }
  4870. base = bucket_block(xbs->bucket, block_off);
  4871. vb.vb_bh = xbs->bucket->bu_bhs[block_off];
  4872. vb.vb_access = ocfs2_journal_access;
  4873. if (ocfs2_meta_ecc(osb)) {
  4874. /*create parameters for ocfs2_post_refcount. */
  4875. bucket = xbs->bucket;
  4876. refcount.credits = bucket->bu_blocks;
  4877. refcount.para = bucket;
  4878. refcount.func =
  4879. ocfs2_xattr_bucket_post_refcount;
  4880. p = &refcount;
  4881. }
  4882. } else {
  4883. base = xbs->base;
  4884. vb.vb_bh = xbs->xattr_bh;
  4885. vb.vb_access = ocfs2_journal_access_xb;
  4886. }
  4887. }
  4888. if (ocfs2_xattr_is_local(xe))
  4889. goto out;
  4890. vb.vb_xv = (struct ocfs2_xattr_value_root *)
  4891. (base + name_offset + name_len);
  4892. ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
  4893. &num_clusters, &vb.vb_xv->xr_list,
  4894. &ext_flags);
  4895. if (ret) {
  4896. mlog_errno(ret);
  4897. goto out;
  4898. }
  4899. /*
  4900. * We just need to check the 1st extent record, since we always
  4901. * CoW the whole xattr. So there shouldn't be a xattr with
  4902. * some REFCOUNT extent recs after the 1st one.
  4903. */
  4904. if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
  4905. goto out;
  4906. ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
  4907. 1, ref_tree, &ref_root_bh);
  4908. if (ret) {
  4909. mlog_errno(ret);
  4910. goto out;
  4911. }
  4912. /*
  4913. * If we are deleting the xattr or the new size will be stored inside,
  4914. * cool, leave it there, the xattr truncate process will remove them
  4915. * for us(it still needs the refcount tree lock and the meta, credits).
  4916. * And the worse case is that every cluster truncate will split the
  4917. * refcount tree, and make the original extent become 3. So we will need
  4918. * 2 * cluster more extent recs at most.
  4919. */
  4920. if (!xi->value || xi->value_len <= OCFS2_XATTR_INLINE_SIZE) {
  4921. ret = ocfs2_refcounted_xattr_delete_need(inode,
  4922. &(*ref_tree)->rf_ci,
  4923. ref_root_bh, vb.vb_xv,
  4924. meta_add, credits);
  4925. if (ret)
  4926. mlog_errno(ret);
  4927. goto out;
  4928. }
  4929. ret = ocfs2_refcount_cow_xattr(inode, di, &vb,
  4930. *ref_tree, ref_root_bh, 0,
  4931. le32_to_cpu(vb.vb_xv->xr_clusters), p);
  4932. if (ret)
  4933. mlog_errno(ret);
  4934. out:
  4935. brelse(ref_root_bh);
  4936. return ret;
  4937. }
  4938. /*
  4939. * Add the REFCOUNTED flags for all the extent rec in ocfs2_xattr_value_root.
  4940. * The physical clusters will be added to refcount tree.
  4941. */
  4942. static int ocfs2_xattr_value_attach_refcount(struct inode *inode,
  4943. struct ocfs2_xattr_value_root *xv,
  4944. struct ocfs2_extent_tree *value_et,
  4945. struct ocfs2_caching_info *ref_ci,
  4946. struct buffer_head *ref_root_bh,
  4947. struct ocfs2_cached_dealloc_ctxt *dealloc,
  4948. struct ocfs2_post_refcount *refcount)
  4949. {
  4950. int ret = 0;
  4951. u32 clusters = le32_to_cpu(xv->xr_clusters);
  4952. u32 cpos, p_cluster, num_clusters;
  4953. struct ocfs2_extent_list *el = &xv->xr_list;
  4954. unsigned int ext_flags;
  4955. cpos = 0;
  4956. while (cpos < clusters) {
  4957. ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
  4958. &num_clusters, el, &ext_flags);
  4959. cpos += num_clusters;
  4960. if ((ext_flags & OCFS2_EXT_REFCOUNTED))
  4961. continue;
  4962. BUG_ON(!p_cluster);
  4963. ret = ocfs2_add_refcount_flag(inode, value_et,
  4964. ref_ci, ref_root_bh,
  4965. cpos - num_clusters,
  4966. p_cluster, num_clusters,
  4967. dealloc, refcount);
  4968. if (ret) {
  4969. mlog_errno(ret);
  4970. break;
  4971. }
  4972. }
  4973. return ret;
  4974. }
  4975. /*
  4976. * Given a normal ocfs2_xattr_header, refcount all the entries which
  4977. * have value stored outside.
  4978. * Used for xattrs stored in inode and ocfs2_xattr_block.
  4979. */
  4980. static int ocfs2_xattr_attach_refcount_normal(struct inode *inode,
  4981. struct ocfs2_xattr_value_buf *vb,
  4982. struct ocfs2_xattr_header *header,
  4983. struct ocfs2_caching_info *ref_ci,
  4984. struct buffer_head *ref_root_bh,
  4985. struct ocfs2_cached_dealloc_ctxt *dealloc)
  4986. {
  4987. struct ocfs2_xattr_entry *xe;
  4988. struct ocfs2_xattr_value_root *xv;
  4989. struct ocfs2_extent_tree et;
  4990. int i, ret = 0;
  4991. for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
  4992. xe = &header->xh_entries[i];
  4993. if (ocfs2_xattr_is_local(xe))
  4994. continue;
  4995. xv = (struct ocfs2_xattr_value_root *)((void *)header +
  4996. le16_to_cpu(xe->xe_name_offset) +
  4997. OCFS2_XATTR_SIZE(xe->xe_name_len));
  4998. vb->vb_xv = xv;
  4999. ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
  5000. ret = ocfs2_xattr_value_attach_refcount(inode, xv, &et,
  5001. ref_ci, ref_root_bh,
  5002. dealloc, NULL);
  5003. if (ret) {
  5004. mlog_errno(ret);
  5005. break;
  5006. }
  5007. }
  5008. return ret;
  5009. }
  5010. static int ocfs2_xattr_inline_attach_refcount(struct inode *inode,
  5011. struct buffer_head *fe_bh,
  5012. struct ocfs2_caching_info *ref_ci,
  5013. struct buffer_head *ref_root_bh,
  5014. struct ocfs2_cached_dealloc_ctxt *dealloc)
  5015. {
  5016. struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
  5017. struct ocfs2_xattr_header *header = (struct ocfs2_xattr_header *)
  5018. (fe_bh->b_data + inode->i_sb->s_blocksize -
  5019. le16_to_cpu(di->i_xattr_inline_size));
  5020. struct ocfs2_xattr_value_buf vb = {
  5021. .vb_bh = fe_bh,
  5022. .vb_access = ocfs2_journal_access_di,
  5023. };
  5024. return ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
  5025. ref_ci, ref_root_bh, dealloc);
  5026. }
  5027. struct ocfs2_xattr_tree_value_refcount_para {
  5028. struct ocfs2_caching_info *ref_ci;
  5029. struct buffer_head *ref_root_bh;
  5030. struct ocfs2_cached_dealloc_ctxt *dealloc;
  5031. };
  5032. static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
  5033. struct ocfs2_xattr_bucket *bucket,
  5034. int offset,
  5035. struct ocfs2_xattr_value_root **xv,
  5036. struct buffer_head **bh)
  5037. {
  5038. int ret, block_off, name_offset;
  5039. struct ocfs2_xattr_header *xh = bucket_xh(bucket);
  5040. struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
  5041. void *base;
  5042. ret = ocfs2_xattr_bucket_get_name_value(sb,
  5043. bucket_xh(bucket),
  5044. offset,
  5045. &block_off,
  5046. &name_offset);
  5047. if (ret) {
  5048. mlog_errno(ret);
  5049. goto out;
  5050. }
  5051. base = bucket_block(bucket, block_off);
  5052. *xv = (struct ocfs2_xattr_value_root *)(base + name_offset +
  5053. OCFS2_XATTR_SIZE(xe->xe_name_len));
  5054. if (bh)
  5055. *bh = bucket->bu_bhs[block_off];
  5056. out:
  5057. return ret;
  5058. }
  5059. /*
  5060. * For a given xattr bucket, refcount all the entries which
  5061. * have value stored outside.
  5062. */
  5063. static int ocfs2_xattr_bucket_value_refcount(struct inode *inode,
  5064. struct ocfs2_xattr_bucket *bucket,
  5065. void *para)
  5066. {
  5067. int i, ret = 0;
  5068. struct ocfs2_extent_tree et;
  5069. struct ocfs2_xattr_tree_value_refcount_para *ref =
  5070. (struct ocfs2_xattr_tree_value_refcount_para *)para;
  5071. struct ocfs2_xattr_header *xh =
  5072. (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
  5073. struct ocfs2_xattr_entry *xe;
  5074. struct ocfs2_xattr_value_buf vb = {
  5075. .vb_access = ocfs2_journal_access,
  5076. };
  5077. struct ocfs2_post_refcount refcount = {
  5078. .credits = bucket->bu_blocks,
  5079. .para = bucket,
  5080. .func = ocfs2_xattr_bucket_post_refcount,
  5081. };
  5082. struct ocfs2_post_refcount *p = NULL;
  5083. /* We only need post_refcount if we support metaecc. */
  5084. if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)))
  5085. p = &refcount;
  5086. mlog(0, "refcount bucket %llu, count = %u\n",
  5087. (unsigned long long)bucket_blkno(bucket),
  5088. le16_to_cpu(xh->xh_count));
  5089. for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
  5090. xe = &xh->xh_entries[i];
  5091. if (ocfs2_xattr_is_local(xe))
  5092. continue;
  5093. ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, i,
  5094. &vb.vb_xv, &vb.vb_bh);
  5095. if (ret) {
  5096. mlog_errno(ret);
  5097. break;
  5098. }
  5099. ocfs2_init_xattr_value_extent_tree(&et,
  5100. INODE_CACHE(inode), &vb);
  5101. ret = ocfs2_xattr_value_attach_refcount(inode, vb.vb_xv,
  5102. &et, ref->ref_ci,
  5103. ref->ref_root_bh,
  5104. ref->dealloc, p);
  5105. if (ret) {
  5106. mlog_errno(ret);
  5107. break;
  5108. }
  5109. }
  5110. return ret;
  5111. }
  5112. static int ocfs2_refcount_xattr_tree_rec(struct inode *inode,
  5113. struct buffer_head *root_bh,
  5114. u64 blkno, u32 cpos, u32 len, void *para)
  5115. {
  5116. return ocfs2_iterate_xattr_buckets(inode, blkno, len,
  5117. ocfs2_xattr_bucket_value_refcount,
  5118. para);
  5119. }
  5120. static int ocfs2_xattr_block_attach_refcount(struct inode *inode,
  5121. struct buffer_head *blk_bh,
  5122. struct ocfs2_caching_info *ref_ci,
  5123. struct buffer_head *ref_root_bh,
  5124. struct ocfs2_cached_dealloc_ctxt *dealloc)
  5125. {
  5126. int ret = 0;
  5127. struct ocfs2_xattr_block *xb =
  5128. (struct ocfs2_xattr_block *)blk_bh->b_data;
  5129. if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
  5130. struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
  5131. struct ocfs2_xattr_value_buf vb = {
  5132. .vb_bh = blk_bh,
  5133. .vb_access = ocfs2_journal_access_xb,
  5134. };
  5135. ret = ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
  5136. ref_ci, ref_root_bh,
  5137. dealloc);
  5138. } else {
  5139. struct ocfs2_xattr_tree_value_refcount_para para = {
  5140. .ref_ci = ref_ci,
  5141. .ref_root_bh = ref_root_bh,
  5142. .dealloc = dealloc,
  5143. };
  5144. ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
  5145. ocfs2_refcount_xattr_tree_rec,
  5146. &para);
  5147. }
  5148. return ret;
  5149. }
  5150. int ocfs2_xattr_attach_refcount_tree(struct inode *inode,
  5151. struct buffer_head *fe_bh,
  5152. struct ocfs2_caching_info *ref_ci,
  5153. struct buffer_head *ref_root_bh,
  5154. struct ocfs2_cached_dealloc_ctxt *dealloc)
  5155. {
  5156. int ret = 0;
  5157. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  5158. struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
  5159. struct buffer_head *blk_bh = NULL;
  5160. if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
  5161. ret = ocfs2_xattr_inline_attach_refcount(inode, fe_bh,
  5162. ref_ci, ref_root_bh,
  5163. dealloc);
  5164. if (ret) {
  5165. mlog_errno(ret);
  5166. goto out;
  5167. }
  5168. }
  5169. if (!di->i_xattr_loc)
  5170. goto out;
  5171. ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
  5172. &blk_bh);
  5173. if (ret < 0) {
  5174. mlog_errno(ret);
  5175. goto out;
  5176. }
  5177. ret = ocfs2_xattr_block_attach_refcount(inode, blk_bh, ref_ci,
  5178. ref_root_bh, dealloc);
  5179. if (ret)
  5180. mlog_errno(ret);
  5181. brelse(blk_bh);
  5182. out:
  5183. return ret;
  5184. }
  5185. typedef int (should_xattr_reflinked)(struct ocfs2_xattr_entry *xe);
  5186. /*
  5187. * Store the information we need in xattr reflink.
  5188. * old_bh and new_bh are inode bh for the old and new inode.
  5189. */
  5190. struct ocfs2_xattr_reflink {
  5191. struct inode *old_inode;
  5192. struct inode *new_inode;
  5193. struct buffer_head *old_bh;
  5194. struct buffer_head *new_bh;
  5195. struct ocfs2_caching_info *ref_ci;
  5196. struct buffer_head *ref_root_bh;
  5197. struct ocfs2_cached_dealloc_ctxt *dealloc;
  5198. should_xattr_reflinked *xattr_reflinked;
  5199. };
  5200. /*
  5201. * Given a xattr header and xe offset,
  5202. * return the proper xv and the corresponding bh.
  5203. * xattr in inode, block and xattr tree have different implementaions.
  5204. */
  5205. typedef int (get_xattr_value_root)(struct super_block *sb,
  5206. struct buffer_head *bh,
  5207. struct ocfs2_xattr_header *xh,
  5208. int offset,
  5209. struct ocfs2_xattr_value_root **xv,
  5210. struct buffer_head **ret_bh,
  5211. void *para);
  5212. /*
  5213. * Calculate all the xattr value root metadata stored in this xattr header and
  5214. * credits we need if we create them from the scratch.
  5215. * We use get_xattr_value_root so that all types of xattr container can use it.
  5216. */
  5217. static int ocfs2_value_metas_in_xattr_header(struct super_block *sb,
  5218. struct buffer_head *bh,
  5219. struct ocfs2_xattr_header *xh,
  5220. int *metas, int *credits,
  5221. int *num_recs,
  5222. get_xattr_value_root *func,
  5223. void *para)
  5224. {
  5225. int i, ret = 0;
  5226. struct ocfs2_xattr_value_root *xv;
  5227. struct ocfs2_xattr_entry *xe;
  5228. for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
  5229. xe = &xh->xh_entries[i];
  5230. if (ocfs2_xattr_is_local(xe))
  5231. continue;
  5232. ret = func(sb, bh, xh, i, &xv, NULL, para);
  5233. if (ret) {
  5234. mlog_errno(ret);
  5235. break;
  5236. }
  5237. *metas += le16_to_cpu(xv->xr_list.l_tree_depth) *
  5238. le16_to_cpu(xv->xr_list.l_next_free_rec);
  5239. *credits += ocfs2_calc_extend_credits(sb,
  5240. &def_xv.xv.xr_list,
  5241. le32_to_cpu(xv->xr_clusters));
  5242. /*
  5243. * If the value is a tree with depth > 1, We don't go deep
  5244. * to the extent block, so just calculate a maximum record num.
  5245. */
  5246. if (!xv->xr_list.l_tree_depth)
  5247. *num_recs += le16_to_cpu(xv->xr_list.l_next_free_rec);
  5248. else
  5249. *num_recs += ocfs2_clusters_for_bytes(sb,
  5250. XATTR_SIZE_MAX);
  5251. }
  5252. return ret;
  5253. }
  5254. /* Used by xattr inode and block to return the right xv and buffer_head. */
  5255. static int ocfs2_get_xattr_value_root(struct super_block *sb,
  5256. struct buffer_head *bh,
  5257. struct ocfs2_xattr_header *xh,
  5258. int offset,
  5259. struct ocfs2_xattr_value_root **xv,
  5260. struct buffer_head **ret_bh,
  5261. void *para)
  5262. {
  5263. struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
  5264. *xv = (struct ocfs2_xattr_value_root *)((void *)xh +
  5265. le16_to_cpu(xe->xe_name_offset) +
  5266. OCFS2_XATTR_SIZE(xe->xe_name_len));
  5267. if (ret_bh)
  5268. *ret_bh = bh;
  5269. return 0;
  5270. }
  5271. /*
  5272. * Lock the meta_ac and caculate how much credits we need for reflink xattrs.
  5273. * It is only used for inline xattr and xattr block.
  5274. */
  5275. static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb,
  5276. struct ocfs2_xattr_header *xh,
  5277. struct buffer_head *ref_root_bh,
  5278. int *credits,
  5279. struct ocfs2_alloc_context **meta_ac)
  5280. {
  5281. int ret, meta_add = 0, num_recs = 0;
  5282. struct ocfs2_refcount_block *rb =
  5283. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  5284. *credits = 0;
  5285. ret = ocfs2_value_metas_in_xattr_header(osb->sb, NULL, xh,
  5286. &meta_add, credits, &num_recs,
  5287. ocfs2_get_xattr_value_root,
  5288. NULL);
  5289. if (ret) {
  5290. mlog_errno(ret);
  5291. goto out;
  5292. }
  5293. /*
  5294. * We need to add/modify num_recs in refcount tree, so just calculate
  5295. * an approximate number we need for refcount tree change.
  5296. * Sometimes we need to split the tree, and after split, half recs
  5297. * will be moved to the new block, and a new block can only provide
  5298. * half number of recs. So we multiple new blocks by 2.
  5299. */
  5300. num_recs = num_recs / ocfs2_refcount_recs_per_rb(osb->sb) * 2;
  5301. meta_add += num_recs;
  5302. *credits += num_recs + num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
  5303. if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
  5304. *credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
  5305. le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
  5306. else
  5307. *credits += 1;
  5308. ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, meta_ac);
  5309. if (ret)
  5310. mlog_errno(ret);
  5311. out:
  5312. return ret;
  5313. }
  5314. /*
  5315. * Given a xattr header, reflink all the xattrs in this container.
  5316. * It can be used for inode, block and bucket.
  5317. *
  5318. * NOTE:
  5319. * Before we call this function, the caller has memcpy the xattr in
  5320. * old_xh to the new_xh.
  5321. *
  5322. * If args.xattr_reflinked is set, call it to decide whether the xe should
  5323. * be reflinked or not. If not, remove it from the new xattr header.
  5324. */
  5325. static int ocfs2_reflink_xattr_header(handle_t *handle,
  5326. struct ocfs2_xattr_reflink *args,
  5327. struct buffer_head *old_bh,
  5328. struct ocfs2_xattr_header *xh,
  5329. struct buffer_head *new_bh,
  5330. struct ocfs2_xattr_header *new_xh,
  5331. struct ocfs2_xattr_value_buf *vb,
  5332. struct ocfs2_alloc_context *meta_ac,
  5333. get_xattr_value_root *func,
  5334. void *para)
  5335. {
  5336. int ret = 0, i, j;
  5337. struct super_block *sb = args->old_inode->i_sb;
  5338. struct buffer_head *value_bh;
  5339. struct ocfs2_xattr_entry *xe, *last;
  5340. struct ocfs2_xattr_value_root *xv, *new_xv;
  5341. struct ocfs2_extent_tree data_et;
  5342. u32 clusters, cpos, p_cluster, num_clusters;
  5343. unsigned int ext_flags = 0;
  5344. mlog(0, "reflink xattr in container %llu, count = %u\n",
  5345. (unsigned long long)old_bh->b_blocknr, le16_to_cpu(xh->xh_count));
  5346. last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)];
  5347. for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) {
  5348. xe = &xh->xh_entries[i];
  5349. if (args->xattr_reflinked && !args->xattr_reflinked(xe)) {
  5350. xe = &new_xh->xh_entries[j];
  5351. le16_add_cpu(&new_xh->xh_count, -1);
  5352. if (new_xh->xh_count) {
  5353. memmove(xe, xe + 1,
  5354. (void *)last - (void *)xe);
  5355. memset(last, 0,
  5356. sizeof(struct ocfs2_xattr_entry));
  5357. }
  5358. /*
  5359. * We don't want j to increase in the next round since
  5360. * it is already moved ahead.
  5361. */
  5362. j--;
  5363. continue;
  5364. }
  5365. if (ocfs2_xattr_is_local(xe))
  5366. continue;
  5367. ret = func(sb, old_bh, xh, i, &xv, NULL, para);
  5368. if (ret) {
  5369. mlog_errno(ret);
  5370. break;
  5371. }
  5372. ret = func(sb, new_bh, new_xh, j, &new_xv, &value_bh, para);
  5373. if (ret) {
  5374. mlog_errno(ret);
  5375. break;
  5376. }
  5377. /*
  5378. * For the xattr which has l_tree_depth = 0, all the extent
  5379. * recs have already be copied to the new xh with the
  5380. * propriate OCFS2_EXT_REFCOUNTED flag we just need to
  5381. * increase the refount count int the refcount tree.
  5382. *
  5383. * For the xattr which has l_tree_depth > 0, we need
  5384. * to initialize it to the empty default value root,
  5385. * and then insert the extents one by one.
  5386. */
  5387. if (xv->xr_list.l_tree_depth) {
  5388. memcpy(new_xv, &def_xv, sizeof(def_xv));
  5389. vb->vb_xv = new_xv;
  5390. vb->vb_bh = value_bh;
  5391. ocfs2_init_xattr_value_extent_tree(&data_et,
  5392. INODE_CACHE(args->new_inode), vb);
  5393. }
  5394. clusters = le32_to_cpu(xv->xr_clusters);
  5395. cpos = 0;
  5396. while (cpos < clusters) {
  5397. ret = ocfs2_xattr_get_clusters(args->old_inode,
  5398. cpos,
  5399. &p_cluster,
  5400. &num_clusters,
  5401. &xv->xr_list,
  5402. &ext_flags);
  5403. if (ret) {
  5404. mlog_errno(ret);
  5405. goto out;
  5406. }
  5407. BUG_ON(!p_cluster);
  5408. if (xv->xr_list.l_tree_depth) {
  5409. ret = ocfs2_insert_extent(handle,
  5410. &data_et, cpos,
  5411. ocfs2_clusters_to_blocks(
  5412. args->old_inode->i_sb,
  5413. p_cluster),
  5414. num_clusters, ext_flags,
  5415. meta_ac);
  5416. if (ret) {
  5417. mlog_errno(ret);
  5418. goto out;
  5419. }
  5420. }
  5421. ret = ocfs2_increase_refcount(handle, args->ref_ci,
  5422. args->ref_root_bh,
  5423. p_cluster, num_clusters,
  5424. meta_ac, args->dealloc);
  5425. if (ret) {
  5426. mlog_errno(ret);
  5427. goto out;
  5428. }
  5429. cpos += num_clusters;
  5430. }
  5431. }
  5432. out:
  5433. return ret;
  5434. }
  5435. static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
  5436. {
  5437. int ret = 0, credits = 0;
  5438. handle_t *handle;
  5439. struct ocfs2_super *osb = OCFS2_SB(args->old_inode->i_sb);
  5440. struct ocfs2_dinode *di = (struct ocfs2_dinode *)args->old_bh->b_data;
  5441. int inline_size = le16_to_cpu(di->i_xattr_inline_size);
  5442. int header_off = osb->sb->s_blocksize - inline_size;
  5443. struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)
  5444. (args->old_bh->b_data + header_off);
  5445. struct ocfs2_xattr_header *new_xh = (struct ocfs2_xattr_header *)
  5446. (args->new_bh->b_data + header_off);
  5447. struct ocfs2_alloc_context *meta_ac = NULL;
  5448. struct ocfs2_inode_info *new_oi;
  5449. struct ocfs2_dinode *new_di;
  5450. struct ocfs2_xattr_value_buf vb = {
  5451. .vb_bh = args->new_bh,
  5452. .vb_access = ocfs2_journal_access_di,
  5453. };
  5454. ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
  5455. &credits, &meta_ac);
  5456. if (ret) {
  5457. mlog_errno(ret);
  5458. goto out;
  5459. }
  5460. handle = ocfs2_start_trans(osb, credits);
  5461. if (IS_ERR(handle)) {
  5462. ret = PTR_ERR(handle);
  5463. mlog_errno(ret);
  5464. goto out;
  5465. }
  5466. ret = ocfs2_journal_access_di(handle, INODE_CACHE(args->new_inode),
  5467. args->new_bh, OCFS2_JOURNAL_ACCESS_WRITE);
  5468. if (ret) {
  5469. mlog_errno(ret);
  5470. goto out_commit;
  5471. }
  5472. memcpy(args->new_bh->b_data + header_off,
  5473. args->old_bh->b_data + header_off, inline_size);
  5474. new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
  5475. new_di->i_xattr_inline_size = cpu_to_le16(inline_size);
  5476. ret = ocfs2_reflink_xattr_header(handle, args, args->old_bh, xh,
  5477. args->new_bh, new_xh, &vb, meta_ac,
  5478. ocfs2_get_xattr_value_root, NULL);
  5479. if (ret) {
  5480. mlog_errno(ret);
  5481. goto out_commit;
  5482. }
  5483. new_oi = OCFS2_I(args->new_inode);
  5484. spin_lock(&new_oi->ip_lock);
  5485. new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
  5486. new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
  5487. spin_unlock(&new_oi->ip_lock);
  5488. ocfs2_journal_dirty(handle, args->new_bh);
  5489. out_commit:
  5490. ocfs2_commit_trans(osb, handle);
  5491. out:
  5492. if (meta_ac)
  5493. ocfs2_free_alloc_context(meta_ac);
  5494. return ret;
  5495. }
  5496. static int ocfs2_create_empty_xattr_block(struct inode *inode,
  5497. struct buffer_head *fe_bh,
  5498. struct buffer_head **ret_bh,
  5499. int indexed)
  5500. {
  5501. int ret;
  5502. handle_t *handle;
  5503. struct ocfs2_alloc_context *meta_ac;
  5504. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  5505. ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
  5506. if (ret < 0) {
  5507. mlog_errno(ret);
  5508. return ret;
  5509. }
  5510. handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
  5511. if (IS_ERR(handle)) {
  5512. ret = PTR_ERR(handle);
  5513. mlog_errno(ret);
  5514. goto out;
  5515. }
  5516. mlog(0, "create new xattr block for inode %llu, index = %d\n",
  5517. (unsigned long long)fe_bh->b_blocknr, indexed);
  5518. ret = ocfs2_create_xattr_block(handle, inode, fe_bh,
  5519. meta_ac, ret_bh, indexed);
  5520. if (ret)
  5521. mlog_errno(ret);
  5522. ocfs2_commit_trans(osb, handle);
  5523. out:
  5524. ocfs2_free_alloc_context(meta_ac);
  5525. return ret;
  5526. }
  5527. static int ocfs2_reflink_xattr_block(struct ocfs2_xattr_reflink *args,
  5528. struct buffer_head *blk_bh,
  5529. struct buffer_head *new_blk_bh)
  5530. {
  5531. int ret = 0, credits = 0;
  5532. handle_t *handle;
  5533. struct ocfs2_inode_info *new_oi = OCFS2_I(args->new_inode);
  5534. struct ocfs2_dinode *new_di;
  5535. struct ocfs2_super *osb = OCFS2_SB(args->new_inode->i_sb);
  5536. int header_off = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
  5537. struct ocfs2_xattr_block *xb =
  5538. (struct ocfs2_xattr_block *)blk_bh->b_data;
  5539. struct ocfs2_xattr_header *xh = &xb->xb_attrs.xb_header;
  5540. struct ocfs2_xattr_block *new_xb =
  5541. (struct ocfs2_xattr_block *)new_blk_bh->b_data;
  5542. struct ocfs2_xattr_header *new_xh = &new_xb->xb_attrs.xb_header;
  5543. struct ocfs2_alloc_context *meta_ac;
  5544. struct ocfs2_xattr_value_buf vb = {
  5545. .vb_bh = new_blk_bh,
  5546. .vb_access = ocfs2_journal_access_xb,
  5547. };
  5548. ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
  5549. &credits, &meta_ac);
  5550. if (ret) {
  5551. mlog_errno(ret);
  5552. return ret;
  5553. }
  5554. /* One more credits in case we need to add xattr flags in new inode. */
  5555. handle = ocfs2_start_trans(osb, credits + 1);
  5556. if (IS_ERR(handle)) {
  5557. ret = PTR_ERR(handle);
  5558. mlog_errno(ret);
  5559. goto out;
  5560. }
  5561. if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
  5562. ret = ocfs2_journal_access_di(handle,
  5563. INODE_CACHE(args->new_inode),
  5564. args->new_bh,
  5565. OCFS2_JOURNAL_ACCESS_WRITE);
  5566. if (ret) {
  5567. mlog_errno(ret);
  5568. goto out_commit;
  5569. }
  5570. }
  5571. ret = ocfs2_journal_access_xb(handle, INODE_CACHE(args->new_inode),
  5572. new_blk_bh, OCFS2_JOURNAL_ACCESS_WRITE);
  5573. if (ret) {
  5574. mlog_errno(ret);
  5575. goto out_commit;
  5576. }
  5577. memcpy(new_blk_bh->b_data + header_off, blk_bh->b_data + header_off,
  5578. osb->sb->s_blocksize - header_off);
  5579. ret = ocfs2_reflink_xattr_header(handle, args, blk_bh, xh,
  5580. new_blk_bh, new_xh, &vb, meta_ac,
  5581. ocfs2_get_xattr_value_root, NULL);
  5582. if (ret) {
  5583. mlog_errno(ret);
  5584. goto out_commit;
  5585. }
  5586. ocfs2_journal_dirty(handle, new_blk_bh);
  5587. if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
  5588. new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
  5589. spin_lock(&new_oi->ip_lock);
  5590. new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
  5591. new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
  5592. spin_unlock(&new_oi->ip_lock);
  5593. ocfs2_journal_dirty(handle, args->new_bh);
  5594. }
  5595. out_commit:
  5596. ocfs2_commit_trans(osb, handle);
  5597. out:
  5598. ocfs2_free_alloc_context(meta_ac);
  5599. return ret;
  5600. }
  5601. struct ocfs2_reflink_xattr_tree_args {
  5602. struct ocfs2_xattr_reflink *reflink;
  5603. struct buffer_head *old_blk_bh;
  5604. struct buffer_head *new_blk_bh;
  5605. struct ocfs2_xattr_bucket *old_bucket;
  5606. struct ocfs2_xattr_bucket *new_bucket;
  5607. };
  5608. /*
  5609. * NOTE:
  5610. * We have to handle the case that both old bucket and new bucket
  5611. * will call this function to get the right ret_bh.
  5612. * So The caller must give us the right bh.
  5613. */
  5614. static int ocfs2_get_reflink_xattr_value_root(struct super_block *sb,
  5615. struct buffer_head *bh,
  5616. struct ocfs2_xattr_header *xh,
  5617. int offset,
  5618. struct ocfs2_xattr_value_root **xv,
  5619. struct buffer_head **ret_bh,
  5620. void *para)
  5621. {
  5622. struct ocfs2_reflink_xattr_tree_args *args =
  5623. (struct ocfs2_reflink_xattr_tree_args *)para;
  5624. struct ocfs2_xattr_bucket *bucket;
  5625. if (bh == args->old_bucket->bu_bhs[0])
  5626. bucket = args->old_bucket;
  5627. else
  5628. bucket = args->new_bucket;
  5629. return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
  5630. xv, ret_bh);
  5631. }
  5632. struct ocfs2_value_tree_metas {
  5633. int num_metas;
  5634. int credits;
  5635. int num_recs;
  5636. };
  5637. static int ocfs2_value_tree_metas_in_bucket(struct super_block *sb,
  5638. struct buffer_head *bh,
  5639. struct ocfs2_xattr_header *xh,
  5640. int offset,
  5641. struct ocfs2_xattr_value_root **xv,
  5642. struct buffer_head **ret_bh,
  5643. void *para)
  5644. {
  5645. struct ocfs2_xattr_bucket *bucket =
  5646. (struct ocfs2_xattr_bucket *)para;
  5647. return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
  5648. xv, ret_bh);
  5649. }
  5650. static int ocfs2_calc_value_tree_metas(struct inode *inode,
  5651. struct ocfs2_xattr_bucket *bucket,
  5652. void *para)
  5653. {
  5654. struct ocfs2_value_tree_metas *metas =
  5655. (struct ocfs2_value_tree_metas *)para;
  5656. struct ocfs2_xattr_header *xh =
  5657. (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
  5658. /* Add the credits for this bucket first. */
  5659. metas->credits += bucket->bu_blocks;
  5660. return ocfs2_value_metas_in_xattr_header(inode->i_sb, bucket->bu_bhs[0],
  5661. xh, &metas->num_metas,
  5662. &metas->credits, &metas->num_recs,
  5663. ocfs2_value_tree_metas_in_bucket,
  5664. bucket);
  5665. }
  5666. /*
  5667. * Given a xattr extent rec starting from blkno and having len clusters,
  5668. * iterate all the buckets calculate how much metadata we need for reflinking
  5669. * all the ocfs2_xattr_value_root and lock the allocators accordingly.
  5670. */
  5671. static int ocfs2_lock_reflink_xattr_rec_allocators(
  5672. struct ocfs2_reflink_xattr_tree_args *args,
  5673. struct ocfs2_extent_tree *xt_et,
  5674. u64 blkno, u32 len, int *credits,
  5675. struct ocfs2_alloc_context **meta_ac,
  5676. struct ocfs2_alloc_context **data_ac)
  5677. {
  5678. int ret, num_free_extents;
  5679. struct ocfs2_value_tree_metas metas;
  5680. struct ocfs2_super *osb = OCFS2_SB(args->reflink->old_inode->i_sb);
  5681. struct ocfs2_refcount_block *rb;
  5682. memset(&metas, 0, sizeof(metas));
  5683. ret = ocfs2_iterate_xattr_buckets(args->reflink->old_inode, blkno, len,
  5684. ocfs2_calc_value_tree_metas, &metas);
  5685. if (ret) {
  5686. mlog_errno(ret);
  5687. goto out;
  5688. }
  5689. *credits = metas.credits;
  5690. /*
  5691. * Calculate we need for refcount tree change.
  5692. *
  5693. * We need to add/modify num_recs in refcount tree, so just calculate
  5694. * an approximate number we need for refcount tree change.
  5695. * Sometimes we need to split the tree, and after split, half recs
  5696. * will be moved to the new block, and a new block can only provide
  5697. * half number of recs. So we multiple new blocks by 2.
  5698. * In the end, we have to add credits for modifying the already
  5699. * existed refcount block.
  5700. */
  5701. rb = (struct ocfs2_refcount_block *)args->reflink->ref_root_bh->b_data;
  5702. metas.num_recs =
  5703. (metas.num_recs + ocfs2_refcount_recs_per_rb(osb->sb) - 1) /
  5704. ocfs2_refcount_recs_per_rb(osb->sb) * 2;
  5705. metas.num_metas += metas.num_recs;
  5706. *credits += metas.num_recs +
  5707. metas.num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
  5708. if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
  5709. *credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
  5710. le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
  5711. else
  5712. *credits += 1;
  5713. /* count in the xattr tree change. */
  5714. num_free_extents = ocfs2_num_free_extents(osb, xt_et);
  5715. if (num_free_extents < 0) {
  5716. ret = num_free_extents;
  5717. mlog_errno(ret);
  5718. goto out;
  5719. }
  5720. if (num_free_extents < len)
  5721. metas.num_metas += ocfs2_extend_meta_needed(xt_et->et_root_el);
  5722. *credits += ocfs2_calc_extend_credits(osb->sb,
  5723. xt_et->et_root_el, len);
  5724. if (metas.num_metas) {
  5725. ret = ocfs2_reserve_new_metadata_blocks(osb, metas.num_metas,
  5726. meta_ac);
  5727. if (ret) {
  5728. mlog_errno(ret);
  5729. goto out;
  5730. }
  5731. }
  5732. if (len) {
  5733. ret = ocfs2_reserve_clusters(osb, len, data_ac);
  5734. if (ret)
  5735. mlog_errno(ret);
  5736. }
  5737. out:
  5738. if (ret) {
  5739. if (*meta_ac) {
  5740. ocfs2_free_alloc_context(*meta_ac);
  5741. meta_ac = NULL;
  5742. }
  5743. }
  5744. return ret;
  5745. }
  5746. static int ocfs2_reflink_xattr_buckets(handle_t *handle,
  5747. u64 blkno, u64 new_blkno, u32 clusters,
  5748. struct ocfs2_alloc_context *meta_ac,
  5749. struct ocfs2_alloc_context *data_ac,
  5750. struct ocfs2_reflink_xattr_tree_args *args)
  5751. {
  5752. int i, j, ret = 0;
  5753. struct super_block *sb = args->reflink->old_inode->i_sb;
  5754. u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
  5755. u32 num_buckets = clusters * bpc;
  5756. int bpb = args->old_bucket->bu_blocks;
  5757. struct ocfs2_xattr_value_buf vb = {
  5758. .vb_access = ocfs2_journal_access,
  5759. };
  5760. for (i = 0; i < num_buckets; i++, blkno += bpb, new_blkno += bpb) {
  5761. ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
  5762. if (ret) {
  5763. mlog_errno(ret);
  5764. break;
  5765. }
  5766. ret = ocfs2_init_xattr_bucket(args->new_bucket, new_blkno);
  5767. if (ret) {
  5768. mlog_errno(ret);
  5769. break;
  5770. }
  5771. /*
  5772. * The real bucket num in this series of blocks is stored
  5773. * in the 1st bucket.
  5774. */
  5775. if (i == 0)
  5776. num_buckets = le16_to_cpu(
  5777. bucket_xh(args->old_bucket)->xh_num_buckets);
  5778. ret = ocfs2_xattr_bucket_journal_access(handle,
  5779. args->new_bucket,
  5780. OCFS2_JOURNAL_ACCESS_CREATE);
  5781. if (ret) {
  5782. mlog_errno(ret);
  5783. break;
  5784. }
  5785. for (j = 0; j < bpb; j++)
  5786. memcpy(bucket_block(args->new_bucket, j),
  5787. bucket_block(args->old_bucket, j),
  5788. sb->s_blocksize);
  5789. ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
  5790. ret = ocfs2_reflink_xattr_header(handle, args->reflink,
  5791. args->old_bucket->bu_bhs[0],
  5792. bucket_xh(args->old_bucket),
  5793. args->new_bucket->bu_bhs[0],
  5794. bucket_xh(args->new_bucket),
  5795. &vb, meta_ac,
  5796. ocfs2_get_reflink_xattr_value_root,
  5797. args);
  5798. if (ret) {
  5799. mlog_errno(ret);
  5800. break;
  5801. }
  5802. /*
  5803. * Re-access and dirty the bucket to calculate metaecc.
  5804. * Because we may extend the transaction in reflink_xattr_header
  5805. * which will let the already accessed block gone.
  5806. */
  5807. ret = ocfs2_xattr_bucket_journal_access(handle,
  5808. args->new_bucket,
  5809. OCFS2_JOURNAL_ACCESS_WRITE);
  5810. if (ret) {
  5811. mlog_errno(ret);
  5812. break;
  5813. }
  5814. ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
  5815. ocfs2_xattr_bucket_relse(args->old_bucket);
  5816. ocfs2_xattr_bucket_relse(args->new_bucket);
  5817. }
  5818. ocfs2_xattr_bucket_relse(args->old_bucket);
  5819. ocfs2_xattr_bucket_relse(args->new_bucket);
  5820. return ret;
  5821. }
  5822. /*
  5823. * Create the same xattr extent record in the new inode's xattr tree.
  5824. */
  5825. static int ocfs2_reflink_xattr_rec(struct inode *inode,
  5826. struct buffer_head *root_bh,
  5827. u64 blkno,
  5828. u32 cpos,
  5829. u32 len,
  5830. void *para)
  5831. {
  5832. int ret, credits = 0;
  5833. u32 p_cluster, num_clusters;
  5834. u64 new_blkno;
  5835. handle_t *handle;
  5836. struct ocfs2_reflink_xattr_tree_args *args =
  5837. (struct ocfs2_reflink_xattr_tree_args *)para;
  5838. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  5839. struct ocfs2_alloc_context *meta_ac = NULL;
  5840. struct ocfs2_alloc_context *data_ac = NULL;
  5841. struct ocfs2_extent_tree et;
  5842. ocfs2_init_xattr_tree_extent_tree(&et,
  5843. INODE_CACHE(args->reflink->new_inode),
  5844. args->new_blk_bh);
  5845. ret = ocfs2_lock_reflink_xattr_rec_allocators(args, &et, blkno,
  5846. len, &credits,
  5847. &meta_ac, &data_ac);
  5848. if (ret) {
  5849. mlog_errno(ret);
  5850. goto out;
  5851. }
  5852. handle = ocfs2_start_trans(osb, credits);
  5853. if (IS_ERR(handle)) {
  5854. ret = PTR_ERR(handle);
  5855. mlog_errno(ret);
  5856. goto out;
  5857. }
  5858. ret = ocfs2_claim_clusters(osb, handle, data_ac,
  5859. len, &p_cluster, &num_clusters);
  5860. if (ret) {
  5861. mlog_errno(ret);
  5862. goto out_commit;
  5863. }
  5864. new_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cluster);
  5865. mlog(0, "reflink xattr buckets %llu to %llu, len %u\n",
  5866. (unsigned long long)blkno, (unsigned long long)new_blkno, len);
  5867. ret = ocfs2_reflink_xattr_buckets(handle, blkno, new_blkno, len,
  5868. meta_ac, data_ac, args);
  5869. if (ret) {
  5870. mlog_errno(ret);
  5871. goto out_commit;
  5872. }
  5873. mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
  5874. (unsigned long long)new_blkno, len, cpos);
  5875. ret = ocfs2_insert_extent(handle, &et, cpos, new_blkno,
  5876. len, 0, meta_ac);
  5877. if (ret)
  5878. mlog_errno(ret);
  5879. out_commit:
  5880. ocfs2_commit_trans(osb, handle);
  5881. out:
  5882. if (meta_ac)
  5883. ocfs2_free_alloc_context(meta_ac);
  5884. if (data_ac)
  5885. ocfs2_free_alloc_context(data_ac);
  5886. return ret;
  5887. }
  5888. /*
  5889. * Create reflinked xattr buckets.
  5890. * We will add bucket one by one, and refcount all the xattrs in the bucket
  5891. * if they are stored outside.
  5892. */
  5893. static int ocfs2_reflink_xattr_tree(struct ocfs2_xattr_reflink *args,
  5894. struct buffer_head *blk_bh,
  5895. struct buffer_head *new_blk_bh)
  5896. {
  5897. int ret;
  5898. struct ocfs2_reflink_xattr_tree_args para;
  5899. memset(&para, 0, sizeof(para));
  5900. para.reflink = args;
  5901. para.old_blk_bh = blk_bh;
  5902. para.new_blk_bh = new_blk_bh;
  5903. para.old_bucket = ocfs2_xattr_bucket_new(args->old_inode);
  5904. if (!para.old_bucket) {
  5905. mlog_errno(-ENOMEM);
  5906. return -ENOMEM;
  5907. }
  5908. para.new_bucket = ocfs2_xattr_bucket_new(args->new_inode);
  5909. if (!para.new_bucket) {
  5910. ret = -ENOMEM;
  5911. mlog_errno(ret);
  5912. goto out;
  5913. }
  5914. ret = ocfs2_iterate_xattr_index_block(args->old_inode, blk_bh,
  5915. ocfs2_reflink_xattr_rec,
  5916. &para);
  5917. if (ret)
  5918. mlog_errno(ret);
  5919. out:
  5920. ocfs2_xattr_bucket_free(para.old_bucket);
  5921. ocfs2_xattr_bucket_free(para.new_bucket);
  5922. return ret;
  5923. }
  5924. static int ocfs2_reflink_xattr_in_block(struct ocfs2_xattr_reflink *args,
  5925. struct buffer_head *blk_bh)
  5926. {
  5927. int ret, indexed = 0;
  5928. struct buffer_head *new_blk_bh = NULL;
  5929. struct ocfs2_xattr_block *xb =
  5930. (struct ocfs2_xattr_block *)blk_bh->b_data;
  5931. if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)
  5932. indexed = 1;
  5933. ret = ocfs2_create_empty_xattr_block(args->new_inode, args->new_bh,
  5934. &new_blk_bh, indexed);
  5935. if (ret) {
  5936. mlog_errno(ret);
  5937. goto out;
  5938. }
  5939. if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED))
  5940. ret = ocfs2_reflink_xattr_block(args, blk_bh, new_blk_bh);
  5941. else
  5942. ret = ocfs2_reflink_xattr_tree(args, blk_bh, new_blk_bh);
  5943. if (ret)
  5944. mlog_errno(ret);
  5945. out:
  5946. brelse(new_blk_bh);
  5947. return ret;
  5948. }
  5949. static int ocfs2_reflink_xattr_no_security(struct ocfs2_xattr_entry *xe)
  5950. {
  5951. int type = ocfs2_xattr_get_type(xe);
  5952. return type != OCFS2_XATTR_INDEX_SECURITY &&
  5953. type != OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS &&
  5954. type != OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
  5955. }
  5956. int ocfs2_reflink_xattrs(struct inode *old_inode,
  5957. struct buffer_head *old_bh,
  5958. struct inode *new_inode,
  5959. struct buffer_head *new_bh,
  5960. bool preserve_security)
  5961. {
  5962. int ret;
  5963. struct ocfs2_xattr_reflink args;
  5964. struct ocfs2_inode_info *oi = OCFS2_I(old_inode);
  5965. struct ocfs2_dinode *di = (struct ocfs2_dinode *)old_bh->b_data;
  5966. struct buffer_head *blk_bh = NULL;
  5967. struct ocfs2_cached_dealloc_ctxt dealloc;
  5968. struct ocfs2_refcount_tree *ref_tree;
  5969. struct buffer_head *ref_root_bh = NULL;
  5970. ret = ocfs2_lock_refcount_tree(OCFS2_SB(old_inode->i_sb),
  5971. le64_to_cpu(di->i_refcount_loc),
  5972. 1, &ref_tree, &ref_root_bh);
  5973. if (ret) {
  5974. mlog_errno(ret);
  5975. goto out;
  5976. }
  5977. ocfs2_init_dealloc_ctxt(&dealloc);
  5978. args.old_inode = old_inode;
  5979. args.new_inode = new_inode;
  5980. args.old_bh = old_bh;
  5981. args.new_bh = new_bh;
  5982. args.ref_ci = &ref_tree->rf_ci;
  5983. args.ref_root_bh = ref_root_bh;
  5984. args.dealloc = &dealloc;
  5985. if (preserve_security)
  5986. args.xattr_reflinked = NULL;
  5987. else
  5988. args.xattr_reflinked = ocfs2_reflink_xattr_no_security;
  5989. if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
  5990. ret = ocfs2_reflink_xattr_inline(&args);
  5991. if (ret) {
  5992. mlog_errno(ret);
  5993. goto out_unlock;
  5994. }
  5995. }
  5996. if (!di->i_xattr_loc)
  5997. goto out_unlock;
  5998. ret = ocfs2_read_xattr_block(old_inode, le64_to_cpu(di->i_xattr_loc),
  5999. &blk_bh);
  6000. if (ret < 0) {
  6001. mlog_errno(ret);
  6002. goto out_unlock;
  6003. }
  6004. ret = ocfs2_reflink_xattr_in_block(&args, blk_bh);
  6005. if (ret)
  6006. mlog_errno(ret);
  6007. brelse(blk_bh);
  6008. out_unlock:
  6009. ocfs2_unlock_refcount_tree(OCFS2_SB(old_inode->i_sb),
  6010. ref_tree, 1);
  6011. brelse(ref_root_bh);
  6012. if (ocfs2_dealloc_has_cluster(&dealloc)) {
  6013. ocfs2_schedule_truncate_log_flush(OCFS2_SB(old_inode->i_sb), 1);
  6014. ocfs2_run_deallocs(OCFS2_SB(old_inode->i_sb), &dealloc);
  6015. }
  6016. out:
  6017. return ret;
  6018. }
  6019. /*
  6020. * Initialize security and acl for a already created inode.
  6021. * Used for reflink a non-preserve-security file.
  6022. *
  6023. * It uses common api like ocfs2_xattr_set, so the caller
  6024. * must not hold any lock expect i_mutex.
  6025. */
  6026. int ocfs2_init_security_and_acl(struct inode *dir,
  6027. struct inode *inode)
  6028. {
  6029. int ret = 0;
  6030. struct buffer_head *dir_bh = NULL;
  6031. struct ocfs2_security_xattr_info si = {
  6032. .enable = 1,
  6033. };
  6034. ret = ocfs2_init_security_get(inode, dir, &si);
  6035. if (!ret) {
  6036. ret = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
  6037. si.name, si.value, si.value_len,
  6038. XATTR_CREATE);
  6039. if (ret) {
  6040. mlog_errno(ret);
  6041. goto leave;
  6042. }
  6043. } else if (ret != -EOPNOTSUPP) {
  6044. mlog_errno(ret);
  6045. goto leave;
  6046. }
  6047. ret = ocfs2_inode_lock(dir, &dir_bh, 0);
  6048. if (ret) {
  6049. mlog_errno(ret);
  6050. goto leave;
  6051. }
  6052. ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
  6053. if (ret)
  6054. mlog_errno(ret);
  6055. ocfs2_inode_unlock(dir, 0);
  6056. brelse(dir_bh);
  6057. leave:
  6058. return ret;
  6059. }
  6060. /*
  6061. * 'security' attributes support
  6062. */
  6063. static size_t ocfs2_xattr_security_list(struct dentry *dentry, char *list,
  6064. size_t list_size, const char *name,
  6065. size_t name_len, int type)
  6066. {
  6067. const size_t prefix_len = XATTR_SECURITY_PREFIX_LEN;
  6068. const size_t total_len = prefix_len + name_len + 1;
  6069. if (list && total_len <= list_size) {
  6070. memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
  6071. memcpy(list + prefix_len, name, name_len);
  6072. list[prefix_len + name_len] = '\0';
  6073. }
  6074. return total_len;
  6075. }
  6076. static int ocfs2_xattr_security_get(struct dentry *dentry, const char *name,
  6077. void *buffer, size_t size, int type)
  6078. {
  6079. if (strcmp(name, "") == 0)
  6080. return -EINVAL;
  6081. return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_SECURITY,
  6082. name, buffer, size);
  6083. }
  6084. static int ocfs2_xattr_security_set(struct dentry *dentry, const char *name,
  6085. const void *value, size_t size, int flags, int type)
  6086. {
  6087. if (strcmp(name, "") == 0)
  6088. return -EINVAL;
  6089. return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_SECURITY,
  6090. name, value, size, flags);
  6091. }
  6092. int ocfs2_init_security_get(struct inode *inode,
  6093. struct inode *dir,
  6094. struct ocfs2_security_xattr_info *si)
  6095. {
  6096. /* check whether ocfs2 support feature xattr */
  6097. if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb)))
  6098. return -EOPNOTSUPP;
  6099. return security_inode_init_security(inode, dir, &si->name, &si->value,
  6100. &si->value_len);
  6101. }
  6102. int ocfs2_init_security_set(handle_t *handle,
  6103. struct inode *inode,
  6104. struct buffer_head *di_bh,
  6105. struct ocfs2_security_xattr_info *si,
  6106. struct ocfs2_alloc_context *xattr_ac,
  6107. struct ocfs2_alloc_context *data_ac)
  6108. {
  6109. return ocfs2_xattr_set_handle(handle, inode, di_bh,
  6110. OCFS2_XATTR_INDEX_SECURITY,
  6111. si->name, si->value, si->value_len, 0,
  6112. xattr_ac, data_ac);
  6113. }
  6114. struct xattr_handler ocfs2_xattr_security_handler = {
  6115. .prefix = XATTR_SECURITY_PREFIX,
  6116. .list = ocfs2_xattr_security_list,
  6117. .get = ocfs2_xattr_security_get,
  6118. .set = ocfs2_xattr_security_set,
  6119. };
  6120. /*
  6121. * 'trusted' attributes support
  6122. */
  6123. static size_t ocfs2_xattr_trusted_list(struct dentry *dentry, char *list,
  6124. size_t list_size, const char *name,
  6125. size_t name_len, int type)
  6126. {
  6127. const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN;
  6128. const size_t total_len = prefix_len + name_len + 1;
  6129. if (list && total_len <= list_size) {
  6130. memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
  6131. memcpy(list + prefix_len, name, name_len);
  6132. list[prefix_len + name_len] = '\0';
  6133. }
  6134. return total_len;
  6135. }
  6136. static int ocfs2_xattr_trusted_get(struct dentry *dentry, const char *name,
  6137. void *buffer, size_t size, int type)
  6138. {
  6139. if (strcmp(name, "") == 0)
  6140. return -EINVAL;
  6141. return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_TRUSTED,
  6142. name, buffer, size);
  6143. }
  6144. static int ocfs2_xattr_trusted_set(struct dentry *dentry, const char *name,
  6145. const void *value, size_t size, int flags, int type)
  6146. {
  6147. if (strcmp(name, "") == 0)
  6148. return -EINVAL;
  6149. return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_TRUSTED,
  6150. name, value, size, flags);
  6151. }
  6152. struct xattr_handler ocfs2_xattr_trusted_handler = {
  6153. .prefix = XATTR_TRUSTED_PREFIX,
  6154. .list = ocfs2_xattr_trusted_list,
  6155. .get = ocfs2_xattr_trusted_get,
  6156. .set = ocfs2_xattr_trusted_set,
  6157. };
  6158. /*
  6159. * 'user' attributes support
  6160. */
  6161. static size_t ocfs2_xattr_user_list(struct dentry *dentry, char *list,
  6162. size_t list_size, const char *name,
  6163. size_t name_len, int type)
  6164. {
  6165. const size_t prefix_len = XATTR_USER_PREFIX_LEN;
  6166. const size_t total_len = prefix_len + name_len + 1;
  6167. struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
  6168. if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
  6169. return 0;
  6170. if (list && total_len <= list_size) {
  6171. memcpy(list, XATTR_USER_PREFIX, prefix_len);
  6172. memcpy(list + prefix_len, name, name_len);
  6173. list[prefix_len + name_len] = '\0';
  6174. }
  6175. return total_len;
  6176. }
  6177. static int ocfs2_xattr_user_get(struct dentry *dentry, const char *name,
  6178. void *buffer, size_t size, int type)
  6179. {
  6180. struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
  6181. if (strcmp(name, "") == 0)
  6182. return -EINVAL;
  6183. if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
  6184. return -EOPNOTSUPP;
  6185. return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_USER, name,
  6186. buffer, size);
  6187. }
  6188. static int ocfs2_xattr_user_set(struct dentry *dentry, const char *name,
  6189. const void *value, size_t size, int flags, int type)
  6190. {
  6191. struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
  6192. if (strcmp(name, "") == 0)
  6193. return -EINVAL;
  6194. if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
  6195. return -EOPNOTSUPP;
  6196. return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_USER,
  6197. name, value, size, flags);
  6198. }
  6199. struct xattr_handler ocfs2_xattr_user_handler = {
  6200. .prefix = XATTR_USER_PREFIX,
  6201. .list = ocfs2_xattr_user_list,
  6202. .get = ocfs2_xattr_user_get,
  6203. .set = ocfs2_xattr_user_set,
  6204. };