refcounttree.c 112 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * refcounttree.c
  5. *
  6. * Copyright (C) 2009 Oracle. All rights reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public
  10. * License version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <linux/sort.h>
  18. #define MLOG_MASK_PREFIX ML_REFCOUNT
  19. #include <cluster/masklog.h>
  20. #include "ocfs2.h"
  21. #include "inode.h"
  22. #include "alloc.h"
  23. #include "suballoc.h"
  24. #include "journal.h"
  25. #include "uptodate.h"
  26. #include "super.h"
  27. #include "buffer_head_io.h"
  28. #include "blockcheck.h"
  29. #include "refcounttree.h"
  30. #include "sysfile.h"
  31. #include "dlmglue.h"
  32. #include "extent_map.h"
  33. #include "aops.h"
  34. #include "xattr.h"
  35. #include "namei.h"
  36. #include <linux/bio.h>
  37. #include <linux/blkdev.h>
  38. #include <linux/gfp.h>
  39. #include <linux/slab.h>
  40. #include <linux/writeback.h>
  41. #include <linux/pagevec.h>
  42. #include <linux/swap.h>
  43. #include <linux/security.h>
  44. #include <linux/fsnotify.h>
  45. #include <linux/quotaops.h>
  46. #include <linux/namei.h>
  47. #include <linux/mount.h>
  48. struct ocfs2_cow_context {
  49. struct inode *inode;
  50. u32 cow_start;
  51. u32 cow_len;
  52. struct ocfs2_extent_tree data_et;
  53. struct ocfs2_refcount_tree *ref_tree;
  54. struct buffer_head *ref_root_bh;
  55. struct ocfs2_alloc_context *meta_ac;
  56. struct ocfs2_alloc_context *data_ac;
  57. struct ocfs2_cached_dealloc_ctxt dealloc;
  58. void *cow_object;
  59. struct ocfs2_post_refcount *post_refcount;
  60. int extra_credits;
  61. int (*get_clusters)(struct ocfs2_cow_context *context,
  62. u32 v_cluster, u32 *p_cluster,
  63. u32 *num_clusters,
  64. unsigned int *extent_flags);
  65. int (*cow_duplicate_clusters)(handle_t *handle,
  66. struct ocfs2_cow_context *context,
  67. u32 cpos, u32 old_cluster,
  68. u32 new_cluster, u32 new_len);
  69. };
  70. static inline struct ocfs2_refcount_tree *
  71. cache_info_to_refcount(struct ocfs2_caching_info *ci)
  72. {
  73. return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
  74. }
  75. static int ocfs2_validate_refcount_block(struct super_block *sb,
  76. struct buffer_head *bh)
  77. {
  78. int rc;
  79. struct ocfs2_refcount_block *rb =
  80. (struct ocfs2_refcount_block *)bh->b_data;
  81. mlog(0, "Validating refcount block %llu\n",
  82. (unsigned long long)bh->b_blocknr);
  83. BUG_ON(!buffer_uptodate(bh));
  84. /*
  85. * If the ecc fails, we return the error but otherwise
  86. * leave the filesystem running. We know any error is
  87. * local to this block.
  88. */
  89. rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
  90. if (rc) {
  91. mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
  92. (unsigned long long)bh->b_blocknr);
  93. return rc;
  94. }
  95. if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
  96. ocfs2_error(sb,
  97. "Refcount block #%llu has bad signature %.*s",
  98. (unsigned long long)bh->b_blocknr, 7,
  99. rb->rf_signature);
  100. return -EINVAL;
  101. }
  102. if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
  103. ocfs2_error(sb,
  104. "Refcount block #%llu has an invalid rf_blkno "
  105. "of %llu",
  106. (unsigned long long)bh->b_blocknr,
  107. (unsigned long long)le64_to_cpu(rb->rf_blkno));
  108. return -EINVAL;
  109. }
  110. if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
  111. ocfs2_error(sb,
  112. "Refcount block #%llu has an invalid "
  113. "rf_fs_generation of #%u",
  114. (unsigned long long)bh->b_blocknr,
  115. le32_to_cpu(rb->rf_fs_generation));
  116. return -EINVAL;
  117. }
  118. return 0;
  119. }
  120. static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
  121. u64 rb_blkno,
  122. struct buffer_head **bh)
  123. {
  124. int rc;
  125. struct buffer_head *tmp = *bh;
  126. rc = ocfs2_read_block(ci, rb_blkno, &tmp,
  127. ocfs2_validate_refcount_block);
  128. /* If ocfs2_read_block() got us a new bh, pass it up. */
  129. if (!rc && !*bh)
  130. *bh = tmp;
  131. return rc;
  132. }
  133. static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
  134. {
  135. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  136. return rf->rf_blkno;
  137. }
  138. static struct super_block *
  139. ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
  140. {
  141. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  142. return rf->rf_sb;
  143. }
  144. static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
  145. {
  146. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  147. spin_lock(&rf->rf_lock);
  148. }
  149. static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
  150. {
  151. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  152. spin_unlock(&rf->rf_lock);
  153. }
  154. static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
  155. {
  156. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  157. mutex_lock(&rf->rf_io_mutex);
  158. }
  159. static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
  160. {
  161. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  162. mutex_unlock(&rf->rf_io_mutex);
  163. }
  164. static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
  165. .co_owner = ocfs2_refcount_cache_owner,
  166. .co_get_super = ocfs2_refcount_cache_get_super,
  167. .co_cache_lock = ocfs2_refcount_cache_lock,
  168. .co_cache_unlock = ocfs2_refcount_cache_unlock,
  169. .co_io_lock = ocfs2_refcount_cache_io_lock,
  170. .co_io_unlock = ocfs2_refcount_cache_io_unlock,
  171. };
  172. static struct ocfs2_refcount_tree *
  173. ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
  174. {
  175. struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
  176. struct ocfs2_refcount_tree *tree = NULL;
  177. while (n) {
  178. tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
  179. if (blkno < tree->rf_blkno)
  180. n = n->rb_left;
  181. else if (blkno > tree->rf_blkno)
  182. n = n->rb_right;
  183. else
  184. return tree;
  185. }
  186. return NULL;
  187. }
  188. /* osb_lock is already locked. */
  189. static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
  190. struct ocfs2_refcount_tree *new)
  191. {
  192. u64 rf_blkno = new->rf_blkno;
  193. struct rb_node *parent = NULL;
  194. struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
  195. struct ocfs2_refcount_tree *tmp;
  196. while (*p) {
  197. parent = *p;
  198. tmp = rb_entry(parent, struct ocfs2_refcount_tree,
  199. rf_node);
  200. if (rf_blkno < tmp->rf_blkno)
  201. p = &(*p)->rb_left;
  202. else if (rf_blkno > tmp->rf_blkno)
  203. p = &(*p)->rb_right;
  204. else {
  205. /* This should never happen! */
  206. mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
  207. (unsigned long long)rf_blkno);
  208. BUG();
  209. }
  210. }
  211. rb_link_node(&new->rf_node, parent, p);
  212. rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
  213. }
  214. static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
  215. {
  216. ocfs2_metadata_cache_exit(&tree->rf_ci);
  217. ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
  218. ocfs2_lock_res_free(&tree->rf_lockres);
  219. kfree(tree);
  220. }
  221. static inline void
  222. ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
  223. struct ocfs2_refcount_tree *tree)
  224. {
  225. rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
  226. if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
  227. osb->osb_ref_tree_lru = NULL;
  228. }
  229. static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
  230. struct ocfs2_refcount_tree *tree)
  231. {
  232. spin_lock(&osb->osb_lock);
  233. ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
  234. spin_unlock(&osb->osb_lock);
  235. }
  236. static void ocfs2_kref_remove_refcount_tree(struct kref *kref)
  237. {
  238. struct ocfs2_refcount_tree *tree =
  239. container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
  240. ocfs2_free_refcount_tree(tree);
  241. }
  242. static inline void
  243. ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
  244. {
  245. kref_get(&tree->rf_getcnt);
  246. }
  247. static inline void
  248. ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
  249. {
  250. kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
  251. }
  252. static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
  253. struct super_block *sb)
  254. {
  255. ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
  256. mutex_init(&new->rf_io_mutex);
  257. new->rf_sb = sb;
  258. spin_lock_init(&new->rf_lock);
  259. }
  260. static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
  261. struct ocfs2_refcount_tree *new,
  262. u64 rf_blkno, u32 generation)
  263. {
  264. init_rwsem(&new->rf_sem);
  265. ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
  266. rf_blkno, generation);
  267. }
  268. static struct ocfs2_refcount_tree*
  269. ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
  270. {
  271. struct ocfs2_refcount_tree *new;
  272. new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
  273. if (!new)
  274. return NULL;
  275. new->rf_blkno = rf_blkno;
  276. kref_init(&new->rf_getcnt);
  277. ocfs2_init_refcount_tree_ci(new, osb->sb);
  278. return new;
  279. }
  280. static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
  281. struct ocfs2_refcount_tree **ret_tree)
  282. {
  283. int ret = 0;
  284. struct ocfs2_refcount_tree *tree, *new = NULL;
  285. struct buffer_head *ref_root_bh = NULL;
  286. struct ocfs2_refcount_block *ref_rb;
  287. spin_lock(&osb->osb_lock);
  288. if (osb->osb_ref_tree_lru &&
  289. osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
  290. tree = osb->osb_ref_tree_lru;
  291. else
  292. tree = ocfs2_find_refcount_tree(osb, rf_blkno);
  293. if (tree)
  294. goto out;
  295. spin_unlock(&osb->osb_lock);
  296. new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
  297. if (!new) {
  298. ret = -ENOMEM;
  299. mlog_errno(ret);
  300. return ret;
  301. }
  302. /*
  303. * We need the generation to create the refcount tree lock and since
  304. * it isn't changed during the tree modification, we are safe here to
  305. * read without protection.
  306. * We also have to purge the cache after we create the lock since the
  307. * refcount block may have the stale data. It can only be trusted when
  308. * we hold the refcount lock.
  309. */
  310. ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
  311. if (ret) {
  312. mlog_errno(ret);
  313. ocfs2_metadata_cache_exit(&new->rf_ci);
  314. kfree(new);
  315. return ret;
  316. }
  317. ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  318. new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
  319. ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
  320. new->rf_generation);
  321. ocfs2_metadata_cache_purge(&new->rf_ci);
  322. spin_lock(&osb->osb_lock);
  323. tree = ocfs2_find_refcount_tree(osb, rf_blkno);
  324. if (tree)
  325. goto out;
  326. ocfs2_insert_refcount_tree(osb, new);
  327. tree = new;
  328. new = NULL;
  329. out:
  330. *ret_tree = tree;
  331. osb->osb_ref_tree_lru = tree;
  332. spin_unlock(&osb->osb_lock);
  333. if (new)
  334. ocfs2_free_refcount_tree(new);
  335. brelse(ref_root_bh);
  336. return ret;
  337. }
  338. static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
  339. {
  340. int ret;
  341. struct buffer_head *di_bh = NULL;
  342. struct ocfs2_dinode *di;
  343. ret = ocfs2_read_inode_block(inode, &di_bh);
  344. if (ret) {
  345. mlog_errno(ret);
  346. goto out;
  347. }
  348. BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  349. di = (struct ocfs2_dinode *)di_bh->b_data;
  350. *ref_blkno = le64_to_cpu(di->i_refcount_loc);
  351. brelse(di_bh);
  352. out:
  353. return ret;
  354. }
  355. static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
  356. struct ocfs2_refcount_tree *tree, int rw)
  357. {
  358. int ret;
  359. ret = ocfs2_refcount_lock(tree, rw);
  360. if (ret) {
  361. mlog_errno(ret);
  362. goto out;
  363. }
  364. if (rw)
  365. down_write(&tree->rf_sem);
  366. else
  367. down_read(&tree->rf_sem);
  368. out:
  369. return ret;
  370. }
  371. /*
  372. * Lock the refcount tree pointed by ref_blkno and return the tree.
  373. * In most case, we lock the tree and read the refcount block.
  374. * So read it here if the caller really needs it.
  375. *
  376. * If the tree has been re-created by other node, it will free the
  377. * old one and re-create it.
  378. */
  379. int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
  380. u64 ref_blkno, int rw,
  381. struct ocfs2_refcount_tree **ret_tree,
  382. struct buffer_head **ref_bh)
  383. {
  384. int ret, delete_tree = 0;
  385. struct ocfs2_refcount_tree *tree = NULL;
  386. struct buffer_head *ref_root_bh = NULL;
  387. struct ocfs2_refcount_block *rb;
  388. again:
  389. ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
  390. if (ret) {
  391. mlog_errno(ret);
  392. return ret;
  393. }
  394. ocfs2_refcount_tree_get(tree);
  395. ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
  396. if (ret) {
  397. mlog_errno(ret);
  398. ocfs2_refcount_tree_put(tree);
  399. goto out;
  400. }
  401. ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
  402. &ref_root_bh);
  403. if (ret) {
  404. mlog_errno(ret);
  405. ocfs2_unlock_refcount_tree(osb, tree, rw);
  406. ocfs2_refcount_tree_put(tree);
  407. goto out;
  408. }
  409. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  410. /*
  411. * If the refcount block has been freed and re-created, we may need
  412. * to recreate the refcount tree also.
  413. *
  414. * Here we just remove the tree from the rb-tree, and the last
  415. * kref holder will unlock and delete this refcount_tree.
  416. * Then we goto "again" and ocfs2_get_refcount_tree will create
  417. * the new refcount tree for us.
  418. */
  419. if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
  420. if (!tree->rf_removed) {
  421. ocfs2_erase_refcount_tree_from_list(osb, tree);
  422. tree->rf_removed = 1;
  423. delete_tree = 1;
  424. }
  425. ocfs2_unlock_refcount_tree(osb, tree, rw);
  426. /*
  427. * We get an extra reference when we create the refcount
  428. * tree, so another put will destroy it.
  429. */
  430. if (delete_tree)
  431. ocfs2_refcount_tree_put(tree);
  432. brelse(ref_root_bh);
  433. ref_root_bh = NULL;
  434. goto again;
  435. }
  436. *ret_tree = tree;
  437. if (ref_bh) {
  438. *ref_bh = ref_root_bh;
  439. ref_root_bh = NULL;
  440. }
  441. out:
  442. brelse(ref_root_bh);
  443. return ret;
  444. }
  445. void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
  446. struct ocfs2_refcount_tree *tree, int rw)
  447. {
  448. if (rw)
  449. up_write(&tree->rf_sem);
  450. else
  451. up_read(&tree->rf_sem);
  452. ocfs2_refcount_unlock(tree, rw);
  453. ocfs2_refcount_tree_put(tree);
  454. }
  455. void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
  456. {
  457. struct rb_node *node;
  458. struct ocfs2_refcount_tree *tree;
  459. struct rb_root *root = &osb->osb_rf_lock_tree;
  460. while ((node = rb_last(root)) != NULL) {
  461. tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
  462. mlog(0, "Purge tree %llu\n",
  463. (unsigned long long) tree->rf_blkno);
  464. rb_erase(&tree->rf_node, root);
  465. ocfs2_free_refcount_tree(tree);
  466. }
  467. }
  468. /*
  469. * Create a refcount tree for an inode.
  470. * We take for granted that the inode is already locked.
  471. */
  472. static int ocfs2_create_refcount_tree(struct inode *inode,
  473. struct buffer_head *di_bh)
  474. {
  475. int ret;
  476. handle_t *handle = NULL;
  477. struct ocfs2_alloc_context *meta_ac = NULL;
  478. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  479. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  480. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  481. struct buffer_head *new_bh = NULL;
  482. struct ocfs2_refcount_block *rb;
  483. struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
  484. u16 suballoc_bit_start;
  485. u32 num_got;
  486. u64 first_blkno;
  487. BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
  488. mlog(0, "create tree for inode %lu\n", inode->i_ino);
  489. ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
  490. if (ret) {
  491. mlog_errno(ret);
  492. goto out;
  493. }
  494. handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
  495. if (IS_ERR(handle)) {
  496. ret = PTR_ERR(handle);
  497. mlog_errno(ret);
  498. goto out;
  499. }
  500. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  501. OCFS2_JOURNAL_ACCESS_WRITE);
  502. if (ret) {
  503. mlog_errno(ret);
  504. goto out_commit;
  505. }
  506. ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
  507. &suballoc_bit_start, &num_got,
  508. &first_blkno);
  509. if (ret) {
  510. mlog_errno(ret);
  511. goto out_commit;
  512. }
  513. new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
  514. if (!new_tree) {
  515. ret = -ENOMEM;
  516. mlog_errno(ret);
  517. goto out_commit;
  518. }
  519. new_bh = sb_getblk(inode->i_sb, first_blkno);
  520. ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
  521. ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
  522. OCFS2_JOURNAL_ACCESS_CREATE);
  523. if (ret) {
  524. mlog_errno(ret);
  525. goto out_commit;
  526. }
  527. /* Initialize ocfs2_refcount_block. */
  528. rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  529. memset(rb, 0, inode->i_sb->s_blocksize);
  530. strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
  531. rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
  532. rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  533. rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
  534. rb->rf_blkno = cpu_to_le64(first_blkno);
  535. rb->rf_count = cpu_to_le32(1);
  536. rb->rf_records.rl_count =
  537. cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
  538. spin_lock(&osb->osb_lock);
  539. rb->rf_generation = osb->s_next_generation++;
  540. spin_unlock(&osb->osb_lock);
  541. ocfs2_journal_dirty(handle, new_bh);
  542. spin_lock(&oi->ip_lock);
  543. oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
  544. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  545. di->i_refcount_loc = cpu_to_le64(first_blkno);
  546. spin_unlock(&oi->ip_lock);
  547. mlog(0, "created tree for inode %lu, refblock %llu\n",
  548. inode->i_ino, (unsigned long long)first_blkno);
  549. ocfs2_journal_dirty(handle, di_bh);
  550. /*
  551. * We have to init the tree lock here since it will use
  552. * the generation number to create it.
  553. */
  554. new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
  555. ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
  556. new_tree->rf_generation);
  557. spin_lock(&osb->osb_lock);
  558. tree = ocfs2_find_refcount_tree(osb, first_blkno);
  559. /*
  560. * We've just created a new refcount tree in this block. If
  561. * we found a refcount tree on the ocfs2_super, it must be
  562. * one we just deleted. We free the old tree before
  563. * inserting the new tree.
  564. */
  565. BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
  566. if (tree)
  567. ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
  568. ocfs2_insert_refcount_tree(osb, new_tree);
  569. spin_unlock(&osb->osb_lock);
  570. new_tree = NULL;
  571. if (tree)
  572. ocfs2_refcount_tree_put(tree);
  573. out_commit:
  574. ocfs2_commit_trans(osb, handle);
  575. out:
  576. if (new_tree) {
  577. ocfs2_metadata_cache_exit(&new_tree->rf_ci);
  578. kfree(new_tree);
  579. }
  580. brelse(new_bh);
  581. if (meta_ac)
  582. ocfs2_free_alloc_context(meta_ac);
  583. return ret;
  584. }
  585. static int ocfs2_set_refcount_tree(struct inode *inode,
  586. struct buffer_head *di_bh,
  587. u64 refcount_loc)
  588. {
  589. int ret;
  590. handle_t *handle = NULL;
  591. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  592. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  593. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  594. struct buffer_head *ref_root_bh = NULL;
  595. struct ocfs2_refcount_block *rb;
  596. struct ocfs2_refcount_tree *ref_tree;
  597. BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
  598. ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
  599. &ref_tree, &ref_root_bh);
  600. if (ret) {
  601. mlog_errno(ret);
  602. return ret;
  603. }
  604. handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
  605. if (IS_ERR(handle)) {
  606. ret = PTR_ERR(handle);
  607. mlog_errno(ret);
  608. goto out;
  609. }
  610. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  611. OCFS2_JOURNAL_ACCESS_WRITE);
  612. if (ret) {
  613. mlog_errno(ret);
  614. goto out_commit;
  615. }
  616. ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
  617. OCFS2_JOURNAL_ACCESS_WRITE);
  618. if (ret) {
  619. mlog_errno(ret);
  620. goto out_commit;
  621. }
  622. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  623. le32_add_cpu(&rb->rf_count, 1);
  624. ocfs2_journal_dirty(handle, ref_root_bh);
  625. spin_lock(&oi->ip_lock);
  626. oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
  627. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  628. di->i_refcount_loc = cpu_to_le64(refcount_loc);
  629. spin_unlock(&oi->ip_lock);
  630. ocfs2_journal_dirty(handle, di_bh);
  631. out_commit:
  632. ocfs2_commit_trans(osb, handle);
  633. out:
  634. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  635. brelse(ref_root_bh);
  636. return ret;
  637. }
  638. int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
  639. {
  640. int ret, delete_tree = 0;
  641. handle_t *handle = NULL;
  642. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  643. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  644. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  645. struct ocfs2_refcount_block *rb;
  646. struct inode *alloc_inode = NULL;
  647. struct buffer_head *alloc_bh = NULL;
  648. struct buffer_head *blk_bh = NULL;
  649. struct ocfs2_refcount_tree *ref_tree;
  650. int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
  651. u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
  652. u16 bit = 0;
  653. if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL))
  654. return 0;
  655. BUG_ON(!ref_blkno);
  656. ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
  657. if (ret) {
  658. mlog_errno(ret);
  659. return ret;
  660. }
  661. rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
  662. /*
  663. * If we are the last user, we need to free the block.
  664. * So lock the allocator ahead.
  665. */
  666. if (le32_to_cpu(rb->rf_count) == 1) {
  667. blk = le64_to_cpu(rb->rf_blkno);
  668. bit = le16_to_cpu(rb->rf_suballoc_bit);
  669. bg_blkno = ocfs2_which_suballoc_group(blk, bit);
  670. alloc_inode = ocfs2_get_system_file_inode(osb,
  671. EXTENT_ALLOC_SYSTEM_INODE,
  672. le16_to_cpu(rb->rf_suballoc_slot));
  673. if (!alloc_inode) {
  674. ret = -ENOMEM;
  675. mlog_errno(ret);
  676. goto out;
  677. }
  678. mutex_lock(&alloc_inode->i_mutex);
  679. ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
  680. if (ret) {
  681. mlog_errno(ret);
  682. goto out_mutex;
  683. }
  684. credits += OCFS2_SUBALLOC_FREE;
  685. }
  686. handle = ocfs2_start_trans(osb, credits);
  687. if (IS_ERR(handle)) {
  688. ret = PTR_ERR(handle);
  689. mlog_errno(ret);
  690. goto out_unlock;
  691. }
  692. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  693. OCFS2_JOURNAL_ACCESS_WRITE);
  694. if (ret) {
  695. mlog_errno(ret);
  696. goto out_commit;
  697. }
  698. ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
  699. OCFS2_JOURNAL_ACCESS_WRITE);
  700. if (ret) {
  701. mlog_errno(ret);
  702. goto out_commit;
  703. }
  704. spin_lock(&oi->ip_lock);
  705. oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
  706. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  707. di->i_refcount_loc = 0;
  708. spin_unlock(&oi->ip_lock);
  709. ocfs2_journal_dirty(handle, di_bh);
  710. le32_add_cpu(&rb->rf_count , -1);
  711. ocfs2_journal_dirty(handle, blk_bh);
  712. if (!rb->rf_count) {
  713. delete_tree = 1;
  714. ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
  715. ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
  716. alloc_bh, bit, bg_blkno, 1);
  717. if (ret)
  718. mlog_errno(ret);
  719. }
  720. out_commit:
  721. ocfs2_commit_trans(osb, handle);
  722. out_unlock:
  723. if (alloc_inode) {
  724. ocfs2_inode_unlock(alloc_inode, 1);
  725. brelse(alloc_bh);
  726. }
  727. out_mutex:
  728. if (alloc_inode) {
  729. mutex_unlock(&alloc_inode->i_mutex);
  730. iput(alloc_inode);
  731. }
  732. out:
  733. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  734. if (delete_tree)
  735. ocfs2_refcount_tree_put(ref_tree);
  736. brelse(blk_bh);
  737. return ret;
  738. }
  739. static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
  740. struct buffer_head *ref_leaf_bh,
  741. u64 cpos, unsigned int len,
  742. struct ocfs2_refcount_rec *ret_rec,
  743. int *index)
  744. {
  745. int i = 0;
  746. struct ocfs2_refcount_block *rb =
  747. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  748. struct ocfs2_refcount_rec *rec = NULL;
  749. for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
  750. rec = &rb->rf_records.rl_recs[i];
  751. if (le64_to_cpu(rec->r_cpos) +
  752. le32_to_cpu(rec->r_clusters) <= cpos)
  753. continue;
  754. else if (le64_to_cpu(rec->r_cpos) > cpos)
  755. break;
  756. /* ok, cpos fail in this rec. Just return. */
  757. if (ret_rec)
  758. *ret_rec = *rec;
  759. goto out;
  760. }
  761. if (ret_rec) {
  762. /* We meet with a hole here, so fake the rec. */
  763. ret_rec->r_cpos = cpu_to_le64(cpos);
  764. ret_rec->r_refcount = 0;
  765. if (i < le16_to_cpu(rb->rf_records.rl_used) &&
  766. le64_to_cpu(rec->r_cpos) < cpos + len)
  767. ret_rec->r_clusters =
  768. cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
  769. else
  770. ret_rec->r_clusters = cpu_to_le32(len);
  771. }
  772. out:
  773. *index = i;
  774. }
  775. /*
  776. * Try to remove refcount tree. The mechanism is:
  777. * 1) Check whether i_clusters == 0, if no, exit.
  778. * 2) check whether we have i_xattr_loc in dinode. if yes, exit.
  779. * 3) Check whether we have inline xattr stored outside, if yes, exit.
  780. * 4) Remove the tree.
  781. */
  782. int ocfs2_try_remove_refcount_tree(struct inode *inode,
  783. struct buffer_head *di_bh)
  784. {
  785. int ret;
  786. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  787. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  788. down_write(&oi->ip_xattr_sem);
  789. down_write(&oi->ip_alloc_sem);
  790. if (oi->ip_clusters)
  791. goto out;
  792. if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc)
  793. goto out;
  794. if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL &&
  795. ocfs2_has_inline_xattr_value_outside(inode, di))
  796. goto out;
  797. ret = ocfs2_remove_refcount_tree(inode, di_bh);
  798. if (ret)
  799. mlog_errno(ret);
  800. out:
  801. up_write(&oi->ip_alloc_sem);
  802. up_write(&oi->ip_xattr_sem);
  803. return 0;
  804. }
  805. /*
  806. * Find the end range for a leaf refcount block indicated by
  807. * el->l_recs[index].e_blkno.
  808. */
  809. static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
  810. struct buffer_head *ref_root_bh,
  811. struct ocfs2_extent_block *eb,
  812. struct ocfs2_extent_list *el,
  813. int index, u32 *cpos_end)
  814. {
  815. int ret, i, subtree_root;
  816. u32 cpos;
  817. u64 blkno;
  818. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  819. struct ocfs2_path *left_path = NULL, *right_path = NULL;
  820. struct ocfs2_extent_tree et;
  821. struct ocfs2_extent_list *tmp_el;
  822. if (index < le16_to_cpu(el->l_next_free_rec) - 1) {
  823. /*
  824. * We have a extent rec after index, so just use the e_cpos
  825. * of the next extent rec.
  826. */
  827. *cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos);
  828. return 0;
  829. }
  830. if (!eb || (eb && !eb->h_next_leaf_blk)) {
  831. /*
  832. * We are the last extent rec, so any high cpos should
  833. * be stored in this leaf refcount block.
  834. */
  835. *cpos_end = UINT_MAX;
  836. return 0;
  837. }
  838. /*
  839. * If the extent block isn't the last one, we have to find
  840. * the subtree root between this extent block and the next
  841. * leaf extent block and get the corresponding e_cpos from
  842. * the subroot. Otherwise we may corrupt the b-tree.
  843. */
  844. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  845. left_path = ocfs2_new_path_from_et(&et);
  846. if (!left_path) {
  847. ret = -ENOMEM;
  848. mlog_errno(ret);
  849. goto out;
  850. }
  851. cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos);
  852. ret = ocfs2_find_path(ci, left_path, cpos);
  853. if (ret) {
  854. mlog_errno(ret);
  855. goto out;
  856. }
  857. right_path = ocfs2_new_path_from_path(left_path);
  858. if (!right_path) {
  859. ret = -ENOMEM;
  860. mlog_errno(ret);
  861. goto out;
  862. }
  863. ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos);
  864. if (ret) {
  865. mlog_errno(ret);
  866. goto out;
  867. }
  868. ret = ocfs2_find_path(ci, right_path, cpos);
  869. if (ret) {
  870. mlog_errno(ret);
  871. goto out;
  872. }
  873. subtree_root = ocfs2_find_subtree_root(&et, left_path,
  874. right_path);
  875. tmp_el = left_path->p_node[subtree_root].el;
  876. blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
  877. for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) {
  878. if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
  879. *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
  880. break;
  881. }
  882. }
  883. BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec));
  884. out:
  885. ocfs2_free_path(left_path);
  886. ocfs2_free_path(right_path);
  887. return ret;
  888. }
  889. /*
  890. * Given a cpos and len, try to find the refcount record which contains cpos.
  891. * 1. If cpos can be found in one refcount record, return the record.
  892. * 2. If cpos can't be found, return a fake record which start from cpos
  893. * and end at a small value between cpos+len and start of the next record.
  894. * This fake record has r_refcount = 0.
  895. */
  896. static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
  897. struct buffer_head *ref_root_bh,
  898. u64 cpos, unsigned int len,
  899. struct ocfs2_refcount_rec *ret_rec,
  900. int *index,
  901. struct buffer_head **ret_bh)
  902. {
  903. int ret = 0, i, found;
  904. u32 low_cpos, uninitialized_var(cpos_end);
  905. struct ocfs2_extent_list *el;
  906. struct ocfs2_extent_rec *rec = NULL;
  907. struct ocfs2_extent_block *eb = NULL;
  908. struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
  909. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  910. struct ocfs2_refcount_block *rb =
  911. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  912. if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
  913. ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
  914. ret_rec, index);
  915. *ret_bh = ref_root_bh;
  916. get_bh(ref_root_bh);
  917. return 0;
  918. }
  919. el = &rb->rf_list;
  920. low_cpos = cpos & OCFS2_32BIT_POS_MASK;
  921. if (el->l_tree_depth) {
  922. ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
  923. if (ret) {
  924. mlog_errno(ret);
  925. goto out;
  926. }
  927. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  928. el = &eb->h_list;
  929. if (el->l_tree_depth) {
  930. ocfs2_error(sb,
  931. "refcount tree %llu has non zero tree "
  932. "depth in leaf btree tree block %llu\n",
  933. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  934. (unsigned long long)eb_bh->b_blocknr);
  935. ret = -EROFS;
  936. goto out;
  937. }
  938. }
  939. found = 0;
  940. for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
  941. rec = &el->l_recs[i];
  942. if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
  943. found = 1;
  944. break;
  945. }
  946. }
  947. if (found) {
  948. ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh,
  949. eb, el, i, &cpos_end);
  950. if (ret) {
  951. mlog_errno(ret);
  952. goto out;
  953. }
  954. if (cpos_end < low_cpos + len)
  955. len = cpos_end - low_cpos;
  956. }
  957. ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
  958. &ref_leaf_bh);
  959. if (ret) {
  960. mlog_errno(ret);
  961. goto out;
  962. }
  963. ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
  964. ret_rec, index);
  965. *ret_bh = ref_leaf_bh;
  966. out:
  967. brelse(eb_bh);
  968. return ret;
  969. }
  970. enum ocfs2_ref_rec_contig {
  971. REF_CONTIG_NONE = 0,
  972. REF_CONTIG_LEFT,
  973. REF_CONTIG_RIGHT,
  974. REF_CONTIG_LEFTRIGHT,
  975. };
  976. static enum ocfs2_ref_rec_contig
  977. ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
  978. int index)
  979. {
  980. if ((rb->rf_records.rl_recs[index].r_refcount ==
  981. rb->rf_records.rl_recs[index + 1].r_refcount) &&
  982. (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
  983. le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
  984. le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
  985. return REF_CONTIG_RIGHT;
  986. return REF_CONTIG_NONE;
  987. }
  988. static enum ocfs2_ref_rec_contig
  989. ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
  990. int index)
  991. {
  992. enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
  993. if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
  994. ret = ocfs2_refcount_rec_adjacent(rb, index);
  995. if (index > 0) {
  996. enum ocfs2_ref_rec_contig tmp;
  997. tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
  998. if (tmp == REF_CONTIG_RIGHT) {
  999. if (ret == REF_CONTIG_RIGHT)
  1000. ret = REF_CONTIG_LEFTRIGHT;
  1001. else
  1002. ret = REF_CONTIG_LEFT;
  1003. }
  1004. }
  1005. return ret;
  1006. }
  1007. static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
  1008. int index)
  1009. {
  1010. BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
  1011. rb->rf_records.rl_recs[index+1].r_refcount);
  1012. le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
  1013. le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
  1014. if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
  1015. memmove(&rb->rf_records.rl_recs[index + 1],
  1016. &rb->rf_records.rl_recs[index + 2],
  1017. sizeof(struct ocfs2_refcount_rec) *
  1018. (le16_to_cpu(rb->rf_records.rl_used) - index - 2));
  1019. memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
  1020. 0, sizeof(struct ocfs2_refcount_rec));
  1021. le16_add_cpu(&rb->rf_records.rl_used, -1);
  1022. }
  1023. /*
  1024. * Merge the refcount rec if we are contiguous with the adjacent recs.
  1025. */
  1026. static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
  1027. int index)
  1028. {
  1029. enum ocfs2_ref_rec_contig contig =
  1030. ocfs2_refcount_rec_contig(rb, index);
  1031. if (contig == REF_CONTIG_NONE)
  1032. return;
  1033. if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
  1034. BUG_ON(index == 0);
  1035. index--;
  1036. }
  1037. ocfs2_rotate_refcount_rec_left(rb, index);
  1038. if (contig == REF_CONTIG_LEFTRIGHT)
  1039. ocfs2_rotate_refcount_rec_left(rb, index);
  1040. }
  1041. /*
  1042. * Change the refcount indexed by "index" in ref_bh.
  1043. * If refcount reaches 0, remove it.
  1044. */
  1045. static int ocfs2_change_refcount_rec(handle_t *handle,
  1046. struct ocfs2_caching_info *ci,
  1047. struct buffer_head *ref_leaf_bh,
  1048. int index, int merge, int change)
  1049. {
  1050. int ret;
  1051. struct ocfs2_refcount_block *rb =
  1052. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1053. struct ocfs2_refcount_list *rl = &rb->rf_records;
  1054. struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
  1055. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1056. OCFS2_JOURNAL_ACCESS_WRITE);
  1057. if (ret) {
  1058. mlog_errno(ret);
  1059. goto out;
  1060. }
  1061. mlog(0, "change index %d, old count %u, change %d\n", index,
  1062. le32_to_cpu(rec->r_refcount), change);
  1063. le32_add_cpu(&rec->r_refcount, change);
  1064. if (!rec->r_refcount) {
  1065. if (index != le16_to_cpu(rl->rl_used) - 1) {
  1066. memmove(rec, rec + 1,
  1067. (le16_to_cpu(rl->rl_used) - index - 1) *
  1068. sizeof(struct ocfs2_refcount_rec));
  1069. memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
  1070. 0, sizeof(struct ocfs2_refcount_rec));
  1071. }
  1072. le16_add_cpu(&rl->rl_used, -1);
  1073. } else if (merge)
  1074. ocfs2_refcount_rec_merge(rb, index);
  1075. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1076. out:
  1077. return ret;
  1078. }
  1079. static int ocfs2_expand_inline_ref_root(handle_t *handle,
  1080. struct ocfs2_caching_info *ci,
  1081. struct buffer_head *ref_root_bh,
  1082. struct buffer_head **ref_leaf_bh,
  1083. struct ocfs2_alloc_context *meta_ac)
  1084. {
  1085. int ret;
  1086. u16 suballoc_bit_start;
  1087. u32 num_got;
  1088. u64 blkno;
  1089. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1090. struct buffer_head *new_bh = NULL;
  1091. struct ocfs2_refcount_block *new_rb;
  1092. struct ocfs2_refcount_block *root_rb =
  1093. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1094. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  1095. OCFS2_JOURNAL_ACCESS_WRITE);
  1096. if (ret) {
  1097. mlog_errno(ret);
  1098. goto out;
  1099. }
  1100. ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
  1101. &suballoc_bit_start, &num_got,
  1102. &blkno);
  1103. if (ret) {
  1104. mlog_errno(ret);
  1105. goto out;
  1106. }
  1107. new_bh = sb_getblk(sb, blkno);
  1108. if (new_bh == NULL) {
  1109. ret = -EIO;
  1110. mlog_errno(ret);
  1111. goto out;
  1112. }
  1113. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  1114. ret = ocfs2_journal_access_rb(handle, ci, new_bh,
  1115. OCFS2_JOURNAL_ACCESS_CREATE);
  1116. if (ret) {
  1117. mlog_errno(ret);
  1118. goto out;
  1119. }
  1120. /*
  1121. * Initialize ocfs2_refcount_block.
  1122. * It should contain the same information as the old root.
  1123. * so just memcpy it and change the corresponding field.
  1124. */
  1125. memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
  1126. new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  1127. new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
  1128. new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  1129. new_rb->rf_blkno = cpu_to_le64(blkno);
  1130. new_rb->rf_cpos = cpu_to_le32(0);
  1131. new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
  1132. new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
  1133. ocfs2_journal_dirty(handle, new_bh);
  1134. /* Now change the root. */
  1135. memset(&root_rb->rf_list, 0, sb->s_blocksize -
  1136. offsetof(struct ocfs2_refcount_block, rf_list));
  1137. root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
  1138. root_rb->rf_clusters = cpu_to_le32(1);
  1139. root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
  1140. root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
  1141. root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
  1142. root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
  1143. ocfs2_journal_dirty(handle, ref_root_bh);
  1144. mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno,
  1145. le16_to_cpu(new_rb->rf_records.rl_used));
  1146. *ref_leaf_bh = new_bh;
  1147. new_bh = NULL;
  1148. out:
  1149. brelse(new_bh);
  1150. return ret;
  1151. }
  1152. static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
  1153. struct ocfs2_refcount_rec *next)
  1154. {
  1155. if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
  1156. ocfs2_get_ref_rec_low_cpos(next))
  1157. return 1;
  1158. return 0;
  1159. }
  1160. static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
  1161. {
  1162. const struct ocfs2_refcount_rec *l = a, *r = b;
  1163. u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
  1164. u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
  1165. if (l_cpos > r_cpos)
  1166. return 1;
  1167. if (l_cpos < r_cpos)
  1168. return -1;
  1169. return 0;
  1170. }
  1171. static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
  1172. {
  1173. const struct ocfs2_refcount_rec *l = a, *r = b;
  1174. u64 l_cpos = le64_to_cpu(l->r_cpos);
  1175. u64 r_cpos = le64_to_cpu(r->r_cpos);
  1176. if (l_cpos > r_cpos)
  1177. return 1;
  1178. if (l_cpos < r_cpos)
  1179. return -1;
  1180. return 0;
  1181. }
  1182. static void swap_refcount_rec(void *a, void *b, int size)
  1183. {
  1184. struct ocfs2_refcount_rec *l = a, *r = b, tmp;
  1185. tmp = *(struct ocfs2_refcount_rec *)l;
  1186. *(struct ocfs2_refcount_rec *)l =
  1187. *(struct ocfs2_refcount_rec *)r;
  1188. *(struct ocfs2_refcount_rec *)r = tmp;
  1189. }
  1190. /*
  1191. * The refcount cpos are ordered by their 64bit cpos,
  1192. * But we will use the low 32 bit to be the e_cpos in the b-tree.
  1193. * So we need to make sure that this pos isn't intersected with others.
  1194. *
  1195. * Note: The refcount block is already sorted by their low 32 bit cpos,
  1196. * So just try the middle pos first, and we will exit when we find
  1197. * the good position.
  1198. */
  1199. static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
  1200. u32 *split_pos, int *split_index)
  1201. {
  1202. int num_used = le16_to_cpu(rl->rl_used);
  1203. int delta, middle = num_used / 2;
  1204. for (delta = 0; delta < middle; delta++) {
  1205. /* Let's check delta earlier than middle */
  1206. if (ocfs2_refcount_rec_no_intersect(
  1207. &rl->rl_recs[middle - delta - 1],
  1208. &rl->rl_recs[middle - delta])) {
  1209. *split_index = middle - delta;
  1210. break;
  1211. }
  1212. /* For even counts, don't walk off the end */
  1213. if ((middle + delta + 1) == num_used)
  1214. continue;
  1215. /* Now try delta past middle */
  1216. if (ocfs2_refcount_rec_no_intersect(
  1217. &rl->rl_recs[middle + delta],
  1218. &rl->rl_recs[middle + delta + 1])) {
  1219. *split_index = middle + delta + 1;
  1220. break;
  1221. }
  1222. }
  1223. if (delta >= middle)
  1224. return -ENOSPC;
  1225. *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
  1226. return 0;
  1227. }
  1228. static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
  1229. struct buffer_head *new_bh,
  1230. u32 *split_cpos)
  1231. {
  1232. int split_index = 0, num_moved, ret;
  1233. u32 cpos = 0;
  1234. struct ocfs2_refcount_block *rb =
  1235. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1236. struct ocfs2_refcount_list *rl = &rb->rf_records;
  1237. struct ocfs2_refcount_block *new_rb =
  1238. (struct ocfs2_refcount_block *)new_bh->b_data;
  1239. struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
  1240. mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n",
  1241. (unsigned long long)ref_leaf_bh->b_blocknr,
  1242. le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
  1243. /*
  1244. * XXX: Improvement later.
  1245. * If we know all the high 32 bit cpos is the same, no need to sort.
  1246. *
  1247. * In order to make the whole process safe, we do:
  1248. * 1. sort the entries by their low 32 bit cpos first so that we can
  1249. * find the split cpos easily.
  1250. * 2. call ocfs2_insert_extent to insert the new refcount block.
  1251. * 3. move the refcount rec to the new block.
  1252. * 4. sort the entries by their 64 bit cpos.
  1253. * 5. dirty the new_rb and rb.
  1254. */
  1255. sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
  1256. sizeof(struct ocfs2_refcount_rec),
  1257. cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
  1258. ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
  1259. if (ret) {
  1260. mlog_errno(ret);
  1261. return ret;
  1262. }
  1263. new_rb->rf_cpos = cpu_to_le32(cpos);
  1264. /* move refcount records starting from split_index to the new block. */
  1265. num_moved = le16_to_cpu(rl->rl_used) - split_index;
  1266. memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
  1267. num_moved * sizeof(struct ocfs2_refcount_rec));
  1268. /*ok, remove the entries we just moved over to the other block. */
  1269. memset(&rl->rl_recs[split_index], 0,
  1270. num_moved * sizeof(struct ocfs2_refcount_rec));
  1271. /* change old and new rl_used accordingly. */
  1272. le16_add_cpu(&rl->rl_used, -num_moved);
  1273. new_rl->rl_used = cpu_to_le16(num_moved);
  1274. sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
  1275. sizeof(struct ocfs2_refcount_rec),
  1276. cmp_refcount_rec_by_cpos, swap_refcount_rec);
  1277. sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
  1278. sizeof(struct ocfs2_refcount_rec),
  1279. cmp_refcount_rec_by_cpos, swap_refcount_rec);
  1280. *split_cpos = cpos;
  1281. return 0;
  1282. }
  1283. static int ocfs2_new_leaf_refcount_block(handle_t *handle,
  1284. struct ocfs2_caching_info *ci,
  1285. struct buffer_head *ref_root_bh,
  1286. struct buffer_head *ref_leaf_bh,
  1287. struct ocfs2_alloc_context *meta_ac)
  1288. {
  1289. int ret;
  1290. u16 suballoc_bit_start;
  1291. u32 num_got, new_cpos;
  1292. u64 blkno;
  1293. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1294. struct ocfs2_refcount_block *root_rb =
  1295. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1296. struct buffer_head *new_bh = NULL;
  1297. struct ocfs2_refcount_block *new_rb;
  1298. struct ocfs2_extent_tree ref_et;
  1299. BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
  1300. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  1301. OCFS2_JOURNAL_ACCESS_WRITE);
  1302. if (ret) {
  1303. mlog_errno(ret);
  1304. goto out;
  1305. }
  1306. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1307. OCFS2_JOURNAL_ACCESS_WRITE);
  1308. if (ret) {
  1309. mlog_errno(ret);
  1310. goto out;
  1311. }
  1312. ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
  1313. &suballoc_bit_start, &num_got,
  1314. &blkno);
  1315. if (ret) {
  1316. mlog_errno(ret);
  1317. goto out;
  1318. }
  1319. new_bh = sb_getblk(sb, blkno);
  1320. if (new_bh == NULL) {
  1321. ret = -EIO;
  1322. mlog_errno(ret);
  1323. goto out;
  1324. }
  1325. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  1326. ret = ocfs2_journal_access_rb(handle, ci, new_bh,
  1327. OCFS2_JOURNAL_ACCESS_CREATE);
  1328. if (ret) {
  1329. mlog_errno(ret);
  1330. goto out;
  1331. }
  1332. /* Initialize ocfs2_refcount_block. */
  1333. new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  1334. memset(new_rb, 0, sb->s_blocksize);
  1335. strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
  1336. new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
  1337. new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  1338. new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
  1339. new_rb->rf_blkno = cpu_to_le64(blkno);
  1340. new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
  1341. new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
  1342. new_rb->rf_records.rl_count =
  1343. cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
  1344. new_rb->rf_generation = root_rb->rf_generation;
  1345. ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
  1346. if (ret) {
  1347. mlog_errno(ret);
  1348. goto out;
  1349. }
  1350. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1351. ocfs2_journal_dirty(handle, new_bh);
  1352. ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
  1353. mlog(0, "insert new leaf block %llu at %u\n",
  1354. (unsigned long long)new_bh->b_blocknr, new_cpos);
  1355. /* Insert the new leaf block with the specific offset cpos. */
  1356. ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
  1357. 1, 0, meta_ac);
  1358. if (ret)
  1359. mlog_errno(ret);
  1360. out:
  1361. brelse(new_bh);
  1362. return ret;
  1363. }
  1364. static int ocfs2_expand_refcount_tree(handle_t *handle,
  1365. struct ocfs2_caching_info *ci,
  1366. struct buffer_head *ref_root_bh,
  1367. struct buffer_head *ref_leaf_bh,
  1368. struct ocfs2_alloc_context *meta_ac)
  1369. {
  1370. int ret;
  1371. struct buffer_head *expand_bh = NULL;
  1372. if (ref_root_bh == ref_leaf_bh) {
  1373. /*
  1374. * the old root bh hasn't been expanded to a b-tree,
  1375. * so expand it first.
  1376. */
  1377. ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
  1378. &expand_bh, meta_ac);
  1379. if (ret) {
  1380. mlog_errno(ret);
  1381. goto out;
  1382. }
  1383. } else {
  1384. expand_bh = ref_leaf_bh;
  1385. get_bh(expand_bh);
  1386. }
  1387. /* Now add a new refcount block into the tree.*/
  1388. ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
  1389. expand_bh, meta_ac);
  1390. if (ret)
  1391. mlog_errno(ret);
  1392. out:
  1393. brelse(expand_bh);
  1394. return ret;
  1395. }
  1396. /*
  1397. * Adjust the extent rec in b-tree representing ref_leaf_bh.
  1398. *
  1399. * Only called when we have inserted a new refcount rec at index 0
  1400. * which means ocfs2_extent_rec.e_cpos may need some change.
  1401. */
  1402. static int ocfs2_adjust_refcount_rec(handle_t *handle,
  1403. struct ocfs2_caching_info *ci,
  1404. struct buffer_head *ref_root_bh,
  1405. struct buffer_head *ref_leaf_bh,
  1406. struct ocfs2_refcount_rec *rec)
  1407. {
  1408. int ret = 0, i;
  1409. u32 new_cpos, old_cpos;
  1410. struct ocfs2_path *path = NULL;
  1411. struct ocfs2_extent_tree et;
  1412. struct ocfs2_refcount_block *rb =
  1413. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1414. struct ocfs2_extent_list *el;
  1415. if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
  1416. goto out;
  1417. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1418. old_cpos = le32_to_cpu(rb->rf_cpos);
  1419. new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
  1420. if (old_cpos <= new_cpos)
  1421. goto out;
  1422. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  1423. path = ocfs2_new_path_from_et(&et);
  1424. if (!path) {
  1425. ret = -ENOMEM;
  1426. mlog_errno(ret);
  1427. goto out;
  1428. }
  1429. ret = ocfs2_find_path(ci, path, old_cpos);
  1430. if (ret) {
  1431. mlog_errno(ret);
  1432. goto out;
  1433. }
  1434. /*
  1435. * 2 more credits, one for the leaf refcount block, one for
  1436. * the extent block contains the extent rec.
  1437. */
  1438. ret = ocfs2_extend_trans(handle, 2);
  1439. if (ret < 0) {
  1440. mlog_errno(ret);
  1441. goto out;
  1442. }
  1443. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1444. OCFS2_JOURNAL_ACCESS_WRITE);
  1445. if (ret < 0) {
  1446. mlog_errno(ret);
  1447. goto out;
  1448. }
  1449. ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
  1450. OCFS2_JOURNAL_ACCESS_WRITE);
  1451. if (ret < 0) {
  1452. mlog_errno(ret);
  1453. goto out;
  1454. }
  1455. /* change the leaf extent block first. */
  1456. el = path_leaf_el(path);
  1457. for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
  1458. if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
  1459. break;
  1460. BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
  1461. el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
  1462. /* change the r_cpos in the leaf block. */
  1463. rb->rf_cpos = cpu_to_le32(new_cpos);
  1464. ocfs2_journal_dirty(handle, path_leaf_bh(path));
  1465. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1466. out:
  1467. ocfs2_free_path(path);
  1468. return ret;
  1469. }
  1470. static int ocfs2_insert_refcount_rec(handle_t *handle,
  1471. struct ocfs2_caching_info *ci,
  1472. struct buffer_head *ref_root_bh,
  1473. struct buffer_head *ref_leaf_bh,
  1474. struct ocfs2_refcount_rec *rec,
  1475. int index, int merge,
  1476. struct ocfs2_alloc_context *meta_ac)
  1477. {
  1478. int ret;
  1479. struct ocfs2_refcount_block *rb =
  1480. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1481. struct ocfs2_refcount_list *rf_list = &rb->rf_records;
  1482. struct buffer_head *new_bh = NULL;
  1483. BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
  1484. if (rf_list->rl_used == rf_list->rl_count) {
  1485. u64 cpos = le64_to_cpu(rec->r_cpos);
  1486. u32 len = le32_to_cpu(rec->r_clusters);
  1487. ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
  1488. ref_leaf_bh, meta_ac);
  1489. if (ret) {
  1490. mlog_errno(ret);
  1491. goto out;
  1492. }
  1493. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1494. cpos, len, NULL, &index,
  1495. &new_bh);
  1496. if (ret) {
  1497. mlog_errno(ret);
  1498. goto out;
  1499. }
  1500. ref_leaf_bh = new_bh;
  1501. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1502. rf_list = &rb->rf_records;
  1503. }
  1504. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1505. OCFS2_JOURNAL_ACCESS_WRITE);
  1506. if (ret) {
  1507. mlog_errno(ret);
  1508. goto out;
  1509. }
  1510. if (index < le16_to_cpu(rf_list->rl_used))
  1511. memmove(&rf_list->rl_recs[index + 1],
  1512. &rf_list->rl_recs[index],
  1513. (le16_to_cpu(rf_list->rl_used) - index) *
  1514. sizeof(struct ocfs2_refcount_rec));
  1515. mlog(0, "insert refcount record start %llu, len %u, count %u "
  1516. "to leaf block %llu at index %d\n",
  1517. (unsigned long long)le64_to_cpu(rec->r_cpos),
  1518. le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount),
  1519. (unsigned long long)ref_leaf_bh->b_blocknr, index);
  1520. rf_list->rl_recs[index] = *rec;
  1521. le16_add_cpu(&rf_list->rl_used, 1);
  1522. if (merge)
  1523. ocfs2_refcount_rec_merge(rb, index);
  1524. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1525. if (index == 0) {
  1526. ret = ocfs2_adjust_refcount_rec(handle, ci,
  1527. ref_root_bh,
  1528. ref_leaf_bh, rec);
  1529. if (ret)
  1530. mlog_errno(ret);
  1531. }
  1532. out:
  1533. brelse(new_bh);
  1534. return ret;
  1535. }
  1536. /*
  1537. * Split the refcount_rec indexed by "index" in ref_leaf_bh.
  1538. * This is much simple than our b-tree code.
  1539. * split_rec is the new refcount rec we want to insert.
  1540. * If split_rec->r_refcount > 0, we are changing the refcount(in case we
  1541. * increase refcount or decrease a refcount to non-zero).
  1542. * If split_rec->r_refcount == 0, we are punching a hole in current refcount
  1543. * rec( in case we decrease a refcount to zero).
  1544. */
  1545. static int ocfs2_split_refcount_rec(handle_t *handle,
  1546. struct ocfs2_caching_info *ci,
  1547. struct buffer_head *ref_root_bh,
  1548. struct buffer_head *ref_leaf_bh,
  1549. struct ocfs2_refcount_rec *split_rec,
  1550. int index, int merge,
  1551. struct ocfs2_alloc_context *meta_ac,
  1552. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1553. {
  1554. int ret, recs_need;
  1555. u32 len;
  1556. struct ocfs2_refcount_block *rb =
  1557. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1558. struct ocfs2_refcount_list *rf_list = &rb->rf_records;
  1559. struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
  1560. struct ocfs2_refcount_rec *tail_rec = NULL;
  1561. struct buffer_head *new_bh = NULL;
  1562. BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
  1563. mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n",
  1564. le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters),
  1565. le64_to_cpu(split_rec->r_cpos),
  1566. le32_to_cpu(split_rec->r_clusters));
  1567. /*
  1568. * If we just need to split the header or tail clusters,
  1569. * no more recs are needed, just split is OK.
  1570. * Otherwise we at least need one new recs.
  1571. */
  1572. if (!split_rec->r_refcount &&
  1573. (split_rec->r_cpos == orig_rec->r_cpos ||
  1574. le64_to_cpu(split_rec->r_cpos) +
  1575. le32_to_cpu(split_rec->r_clusters) ==
  1576. le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
  1577. recs_need = 0;
  1578. else
  1579. recs_need = 1;
  1580. /*
  1581. * We need one more rec if we split in the middle and the new rec have
  1582. * some refcount in it.
  1583. */
  1584. if (split_rec->r_refcount &&
  1585. (split_rec->r_cpos != orig_rec->r_cpos &&
  1586. le64_to_cpu(split_rec->r_cpos) +
  1587. le32_to_cpu(split_rec->r_clusters) !=
  1588. le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
  1589. recs_need++;
  1590. /* If the leaf block don't have enough record, expand it. */
  1591. if (le16_to_cpu(rf_list->rl_used) + recs_need >
  1592. le16_to_cpu(rf_list->rl_count)) {
  1593. struct ocfs2_refcount_rec tmp_rec;
  1594. u64 cpos = le64_to_cpu(orig_rec->r_cpos);
  1595. len = le32_to_cpu(orig_rec->r_clusters);
  1596. ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
  1597. ref_leaf_bh, meta_ac);
  1598. if (ret) {
  1599. mlog_errno(ret);
  1600. goto out;
  1601. }
  1602. /*
  1603. * We have to re-get it since now cpos may be moved to
  1604. * another leaf block.
  1605. */
  1606. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1607. cpos, len, &tmp_rec, &index,
  1608. &new_bh);
  1609. if (ret) {
  1610. mlog_errno(ret);
  1611. goto out;
  1612. }
  1613. ref_leaf_bh = new_bh;
  1614. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1615. rf_list = &rb->rf_records;
  1616. orig_rec = &rf_list->rl_recs[index];
  1617. }
  1618. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1619. OCFS2_JOURNAL_ACCESS_WRITE);
  1620. if (ret) {
  1621. mlog_errno(ret);
  1622. goto out;
  1623. }
  1624. /*
  1625. * We have calculated out how many new records we need and store
  1626. * in recs_need, so spare enough space first by moving the records
  1627. * after "index" to the end.
  1628. */
  1629. if (index != le16_to_cpu(rf_list->rl_used) - 1)
  1630. memmove(&rf_list->rl_recs[index + 1 + recs_need],
  1631. &rf_list->rl_recs[index + 1],
  1632. (le16_to_cpu(rf_list->rl_used) - index - 1) *
  1633. sizeof(struct ocfs2_refcount_rec));
  1634. len = (le64_to_cpu(orig_rec->r_cpos) +
  1635. le32_to_cpu(orig_rec->r_clusters)) -
  1636. (le64_to_cpu(split_rec->r_cpos) +
  1637. le32_to_cpu(split_rec->r_clusters));
  1638. /*
  1639. * If we have "len", the we will split in the tail and move it
  1640. * to the end of the space we have just spared.
  1641. */
  1642. if (len) {
  1643. tail_rec = &rf_list->rl_recs[index + recs_need];
  1644. memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
  1645. le64_add_cpu(&tail_rec->r_cpos,
  1646. le32_to_cpu(tail_rec->r_clusters) - len);
  1647. tail_rec->r_clusters = cpu_to_le32(len);
  1648. }
  1649. /*
  1650. * If the split pos isn't the same as the original one, we need to
  1651. * split in the head.
  1652. *
  1653. * Note: We have the chance that split_rec.r_refcount = 0,
  1654. * recs_need = 0 and len > 0, which means we just cut the head from
  1655. * the orig_rec and in that case we have done some modification in
  1656. * orig_rec above, so the check for r_cpos is faked.
  1657. */
  1658. if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
  1659. len = le64_to_cpu(split_rec->r_cpos) -
  1660. le64_to_cpu(orig_rec->r_cpos);
  1661. orig_rec->r_clusters = cpu_to_le32(len);
  1662. index++;
  1663. }
  1664. le16_add_cpu(&rf_list->rl_used, recs_need);
  1665. if (split_rec->r_refcount) {
  1666. rf_list->rl_recs[index] = *split_rec;
  1667. mlog(0, "insert refcount record start %llu, len %u, count %u "
  1668. "to leaf block %llu at index %d\n",
  1669. (unsigned long long)le64_to_cpu(split_rec->r_cpos),
  1670. le32_to_cpu(split_rec->r_clusters),
  1671. le32_to_cpu(split_rec->r_refcount),
  1672. (unsigned long long)ref_leaf_bh->b_blocknr, index);
  1673. if (merge)
  1674. ocfs2_refcount_rec_merge(rb, index);
  1675. }
  1676. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1677. out:
  1678. brelse(new_bh);
  1679. return ret;
  1680. }
  1681. static int __ocfs2_increase_refcount(handle_t *handle,
  1682. struct ocfs2_caching_info *ci,
  1683. struct buffer_head *ref_root_bh,
  1684. u64 cpos, u32 len, int merge,
  1685. struct ocfs2_alloc_context *meta_ac,
  1686. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1687. {
  1688. int ret = 0, index;
  1689. struct buffer_head *ref_leaf_bh = NULL;
  1690. struct ocfs2_refcount_rec rec;
  1691. unsigned int set_len = 0;
  1692. mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n",
  1693. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  1694. (unsigned long long)cpos, len);
  1695. while (len) {
  1696. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1697. cpos, len, &rec, &index,
  1698. &ref_leaf_bh);
  1699. if (ret) {
  1700. mlog_errno(ret);
  1701. goto out;
  1702. }
  1703. set_len = le32_to_cpu(rec.r_clusters);
  1704. /*
  1705. * Here we may meet with 3 situations:
  1706. *
  1707. * 1. If we find an already existing record, and the length
  1708. * is the same, cool, we just need to increase the r_refcount
  1709. * and it is OK.
  1710. * 2. If we find a hole, just insert it with r_refcount = 1.
  1711. * 3. If we are in the middle of one extent record, split
  1712. * it.
  1713. */
  1714. if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
  1715. set_len <= len) {
  1716. mlog(0, "increase refcount rec, start %llu, len %u, "
  1717. "count %u\n", (unsigned long long)cpos, set_len,
  1718. le32_to_cpu(rec.r_refcount));
  1719. ret = ocfs2_change_refcount_rec(handle, ci,
  1720. ref_leaf_bh, index,
  1721. merge, 1);
  1722. if (ret) {
  1723. mlog_errno(ret);
  1724. goto out;
  1725. }
  1726. } else if (!rec.r_refcount) {
  1727. rec.r_refcount = cpu_to_le32(1);
  1728. mlog(0, "insert refcount rec, start %llu, len %u\n",
  1729. (unsigned long long)le64_to_cpu(rec.r_cpos),
  1730. set_len);
  1731. ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
  1732. ref_leaf_bh,
  1733. &rec, index,
  1734. merge, meta_ac);
  1735. if (ret) {
  1736. mlog_errno(ret);
  1737. goto out;
  1738. }
  1739. } else {
  1740. set_len = min((u64)(cpos + len),
  1741. le64_to_cpu(rec.r_cpos) + set_len) - cpos;
  1742. rec.r_cpos = cpu_to_le64(cpos);
  1743. rec.r_clusters = cpu_to_le32(set_len);
  1744. le32_add_cpu(&rec.r_refcount, 1);
  1745. mlog(0, "split refcount rec, start %llu, "
  1746. "len %u, count %u\n",
  1747. (unsigned long long)le64_to_cpu(rec.r_cpos),
  1748. set_len, le32_to_cpu(rec.r_refcount));
  1749. ret = ocfs2_split_refcount_rec(handle, ci,
  1750. ref_root_bh, ref_leaf_bh,
  1751. &rec, index, merge,
  1752. meta_ac, dealloc);
  1753. if (ret) {
  1754. mlog_errno(ret);
  1755. goto out;
  1756. }
  1757. }
  1758. cpos += set_len;
  1759. len -= set_len;
  1760. brelse(ref_leaf_bh);
  1761. ref_leaf_bh = NULL;
  1762. }
  1763. out:
  1764. brelse(ref_leaf_bh);
  1765. return ret;
  1766. }
  1767. static int ocfs2_remove_refcount_extent(handle_t *handle,
  1768. struct ocfs2_caching_info *ci,
  1769. struct buffer_head *ref_root_bh,
  1770. struct buffer_head *ref_leaf_bh,
  1771. struct ocfs2_alloc_context *meta_ac,
  1772. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1773. {
  1774. int ret;
  1775. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1776. struct ocfs2_refcount_block *rb =
  1777. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1778. struct ocfs2_extent_tree et;
  1779. BUG_ON(rb->rf_records.rl_used);
  1780. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  1781. ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
  1782. 1, meta_ac, dealloc);
  1783. if (ret) {
  1784. mlog_errno(ret);
  1785. goto out;
  1786. }
  1787. ocfs2_remove_from_cache(ci, ref_leaf_bh);
  1788. /*
  1789. * add the freed block to the dealloc so that it will be freed
  1790. * when we run dealloc.
  1791. */
  1792. ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
  1793. le16_to_cpu(rb->rf_suballoc_slot),
  1794. le64_to_cpu(rb->rf_blkno),
  1795. le16_to_cpu(rb->rf_suballoc_bit));
  1796. if (ret) {
  1797. mlog_errno(ret);
  1798. goto out;
  1799. }
  1800. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  1801. OCFS2_JOURNAL_ACCESS_WRITE);
  1802. if (ret) {
  1803. mlog_errno(ret);
  1804. goto out;
  1805. }
  1806. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1807. le32_add_cpu(&rb->rf_clusters, -1);
  1808. /*
  1809. * check whether we need to restore the root refcount block if
  1810. * there is no leaf extent block at atll.
  1811. */
  1812. if (!rb->rf_list.l_next_free_rec) {
  1813. BUG_ON(rb->rf_clusters);
  1814. mlog(0, "reset refcount tree root %llu to be a record block.\n",
  1815. (unsigned long long)ref_root_bh->b_blocknr);
  1816. rb->rf_flags = 0;
  1817. rb->rf_parent = 0;
  1818. rb->rf_cpos = 0;
  1819. memset(&rb->rf_records, 0, sb->s_blocksize -
  1820. offsetof(struct ocfs2_refcount_block, rf_records));
  1821. rb->rf_records.rl_count =
  1822. cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
  1823. }
  1824. ocfs2_journal_dirty(handle, ref_root_bh);
  1825. out:
  1826. return ret;
  1827. }
  1828. int ocfs2_increase_refcount(handle_t *handle,
  1829. struct ocfs2_caching_info *ci,
  1830. struct buffer_head *ref_root_bh,
  1831. u64 cpos, u32 len,
  1832. struct ocfs2_alloc_context *meta_ac,
  1833. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1834. {
  1835. return __ocfs2_increase_refcount(handle, ci, ref_root_bh,
  1836. cpos, len, 1,
  1837. meta_ac, dealloc);
  1838. }
  1839. static int ocfs2_decrease_refcount_rec(handle_t *handle,
  1840. struct ocfs2_caching_info *ci,
  1841. struct buffer_head *ref_root_bh,
  1842. struct buffer_head *ref_leaf_bh,
  1843. int index, u64 cpos, unsigned int len,
  1844. struct ocfs2_alloc_context *meta_ac,
  1845. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1846. {
  1847. int ret;
  1848. struct ocfs2_refcount_block *rb =
  1849. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1850. struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
  1851. BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
  1852. BUG_ON(cpos + len >
  1853. le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
  1854. if (cpos == le64_to_cpu(rec->r_cpos) &&
  1855. len == le32_to_cpu(rec->r_clusters))
  1856. ret = ocfs2_change_refcount_rec(handle, ci,
  1857. ref_leaf_bh, index, 1, -1);
  1858. else {
  1859. struct ocfs2_refcount_rec split = *rec;
  1860. split.r_cpos = cpu_to_le64(cpos);
  1861. split.r_clusters = cpu_to_le32(len);
  1862. le32_add_cpu(&split.r_refcount, -1);
  1863. mlog(0, "split refcount rec, start %llu, "
  1864. "len %u, count %u, original start %llu, len %u\n",
  1865. (unsigned long long)le64_to_cpu(split.r_cpos),
  1866. len, le32_to_cpu(split.r_refcount),
  1867. (unsigned long long)le64_to_cpu(rec->r_cpos),
  1868. le32_to_cpu(rec->r_clusters));
  1869. ret = ocfs2_split_refcount_rec(handle, ci,
  1870. ref_root_bh, ref_leaf_bh,
  1871. &split, index, 1,
  1872. meta_ac, dealloc);
  1873. }
  1874. if (ret) {
  1875. mlog_errno(ret);
  1876. goto out;
  1877. }
  1878. /* Remove the leaf refcount block if it contains no refcount record. */
  1879. if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
  1880. ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
  1881. ref_leaf_bh, meta_ac,
  1882. dealloc);
  1883. if (ret)
  1884. mlog_errno(ret);
  1885. }
  1886. out:
  1887. return ret;
  1888. }
  1889. static int __ocfs2_decrease_refcount(handle_t *handle,
  1890. struct ocfs2_caching_info *ci,
  1891. struct buffer_head *ref_root_bh,
  1892. u64 cpos, u32 len,
  1893. struct ocfs2_alloc_context *meta_ac,
  1894. struct ocfs2_cached_dealloc_ctxt *dealloc,
  1895. int delete)
  1896. {
  1897. int ret = 0, index = 0;
  1898. struct ocfs2_refcount_rec rec;
  1899. unsigned int r_count = 0, r_len;
  1900. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1901. struct buffer_head *ref_leaf_bh = NULL;
  1902. mlog(0, "Tree owner %llu, decrease refcount start %llu, "
  1903. "len %u, delete %u\n",
  1904. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  1905. (unsigned long long)cpos, len, delete);
  1906. while (len) {
  1907. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1908. cpos, len, &rec, &index,
  1909. &ref_leaf_bh);
  1910. if (ret) {
  1911. mlog_errno(ret);
  1912. goto out;
  1913. }
  1914. r_count = le32_to_cpu(rec.r_refcount);
  1915. BUG_ON(r_count == 0);
  1916. if (!delete)
  1917. BUG_ON(r_count > 1);
  1918. r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
  1919. le32_to_cpu(rec.r_clusters)) - cpos;
  1920. ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
  1921. ref_leaf_bh, index,
  1922. cpos, r_len,
  1923. meta_ac, dealloc);
  1924. if (ret) {
  1925. mlog_errno(ret);
  1926. goto out;
  1927. }
  1928. if (le32_to_cpu(rec.r_refcount) == 1 && delete) {
  1929. ret = ocfs2_cache_cluster_dealloc(dealloc,
  1930. ocfs2_clusters_to_blocks(sb, cpos),
  1931. r_len);
  1932. if (ret) {
  1933. mlog_errno(ret);
  1934. goto out;
  1935. }
  1936. }
  1937. cpos += r_len;
  1938. len -= r_len;
  1939. brelse(ref_leaf_bh);
  1940. ref_leaf_bh = NULL;
  1941. }
  1942. out:
  1943. brelse(ref_leaf_bh);
  1944. return ret;
  1945. }
  1946. /* Caller must hold refcount tree lock. */
  1947. int ocfs2_decrease_refcount(struct inode *inode,
  1948. handle_t *handle, u32 cpos, u32 len,
  1949. struct ocfs2_alloc_context *meta_ac,
  1950. struct ocfs2_cached_dealloc_ctxt *dealloc,
  1951. int delete)
  1952. {
  1953. int ret;
  1954. u64 ref_blkno;
  1955. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  1956. struct buffer_head *ref_root_bh = NULL;
  1957. struct ocfs2_refcount_tree *tree;
  1958. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  1959. ret = ocfs2_get_refcount_block(inode, &ref_blkno);
  1960. if (ret) {
  1961. mlog_errno(ret);
  1962. goto out;
  1963. }
  1964. ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
  1965. if (ret) {
  1966. mlog_errno(ret);
  1967. goto out;
  1968. }
  1969. ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
  1970. &ref_root_bh);
  1971. if (ret) {
  1972. mlog_errno(ret);
  1973. goto out;
  1974. }
  1975. ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
  1976. cpos, len, meta_ac, dealloc, delete);
  1977. if (ret)
  1978. mlog_errno(ret);
  1979. out:
  1980. brelse(ref_root_bh);
  1981. return ret;
  1982. }
  1983. /*
  1984. * Mark the already-existing extent at cpos as refcounted for len clusters.
  1985. * This adds the refcount extent flag.
  1986. *
  1987. * If the existing extent is larger than the request, initiate a
  1988. * split. An attempt will be made at merging with adjacent extents.
  1989. *
  1990. * The caller is responsible for passing down meta_ac if we'll need it.
  1991. */
  1992. static int ocfs2_mark_extent_refcounted(struct inode *inode,
  1993. struct ocfs2_extent_tree *et,
  1994. handle_t *handle, u32 cpos,
  1995. u32 len, u32 phys,
  1996. struct ocfs2_alloc_context *meta_ac,
  1997. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1998. {
  1999. int ret;
  2000. mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n",
  2001. inode->i_ino, cpos, len, phys);
  2002. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  2003. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  2004. "tree, but the feature bit is not set in the "
  2005. "super block.", inode->i_ino);
  2006. ret = -EROFS;
  2007. goto out;
  2008. }
  2009. ret = ocfs2_change_extent_flag(handle, et, cpos,
  2010. len, phys, meta_ac, dealloc,
  2011. OCFS2_EXT_REFCOUNTED, 0);
  2012. if (ret)
  2013. mlog_errno(ret);
  2014. out:
  2015. return ret;
  2016. }
  2017. /*
  2018. * Given some contiguous physical clusters, calculate what we need
  2019. * for modifying their refcount.
  2020. */
  2021. static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
  2022. struct ocfs2_caching_info *ci,
  2023. struct buffer_head *ref_root_bh,
  2024. u64 start_cpos,
  2025. u32 clusters,
  2026. int *meta_add,
  2027. int *credits)
  2028. {
  2029. int ret = 0, index, ref_blocks = 0, recs_add = 0;
  2030. u64 cpos = start_cpos;
  2031. struct ocfs2_refcount_block *rb;
  2032. struct ocfs2_refcount_rec rec;
  2033. struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
  2034. u32 len;
  2035. mlog(0, "start_cpos %llu, clusters %u\n",
  2036. (unsigned long long)start_cpos, clusters);
  2037. while (clusters) {
  2038. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  2039. cpos, clusters, &rec,
  2040. &index, &ref_leaf_bh);
  2041. if (ret) {
  2042. mlog_errno(ret);
  2043. goto out;
  2044. }
  2045. if (ref_leaf_bh != prev_bh) {
  2046. /*
  2047. * Now we encounter a new leaf block, so calculate
  2048. * whether we need to extend the old leaf.
  2049. */
  2050. if (prev_bh) {
  2051. rb = (struct ocfs2_refcount_block *)
  2052. prev_bh->b_data;
  2053. if (le64_to_cpu(rb->rf_records.rl_used) +
  2054. recs_add >
  2055. le16_to_cpu(rb->rf_records.rl_count))
  2056. ref_blocks++;
  2057. }
  2058. recs_add = 0;
  2059. *credits += 1;
  2060. brelse(prev_bh);
  2061. prev_bh = ref_leaf_bh;
  2062. get_bh(prev_bh);
  2063. }
  2064. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  2065. mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu,"
  2066. "rec->r_clusters %u, rec->r_refcount %u, index %d\n",
  2067. recs_add, (unsigned long long)cpos, clusters,
  2068. (unsigned long long)le64_to_cpu(rec.r_cpos),
  2069. le32_to_cpu(rec.r_clusters),
  2070. le32_to_cpu(rec.r_refcount), index);
  2071. len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
  2072. le32_to_cpu(rec.r_clusters)) - cpos;
  2073. /*
  2074. * If the refcount rec already exist, cool. We just need
  2075. * to check whether there is a split. Otherwise we just need
  2076. * to increase the refcount.
  2077. * If we will insert one, increases recs_add.
  2078. *
  2079. * We record all the records which will be inserted to the
  2080. * same refcount block, so that we can tell exactly whether
  2081. * we need a new refcount block or not.
  2082. */
  2083. if (rec.r_refcount) {
  2084. /* Check whether we need a split at the beginning. */
  2085. if (cpos == start_cpos &&
  2086. cpos != le64_to_cpu(rec.r_cpos))
  2087. recs_add++;
  2088. /* Check whether we need a split in the end. */
  2089. if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
  2090. le32_to_cpu(rec.r_clusters))
  2091. recs_add++;
  2092. } else
  2093. recs_add++;
  2094. brelse(ref_leaf_bh);
  2095. ref_leaf_bh = NULL;
  2096. clusters -= len;
  2097. cpos += len;
  2098. }
  2099. if (prev_bh) {
  2100. rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
  2101. if (le64_to_cpu(rb->rf_records.rl_used) + recs_add >
  2102. le16_to_cpu(rb->rf_records.rl_count))
  2103. ref_blocks++;
  2104. *credits += 1;
  2105. }
  2106. if (!ref_blocks)
  2107. goto out;
  2108. mlog(0, "we need ref_blocks %d\n", ref_blocks);
  2109. *meta_add += ref_blocks;
  2110. *credits += ref_blocks;
  2111. /*
  2112. * So we may need ref_blocks to insert into the tree.
  2113. * That also means we need to change the b-tree and add that number
  2114. * of records since we never merge them.
  2115. * We need one more block for expansion since the new created leaf
  2116. * block is also full and needs split.
  2117. */
  2118. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  2119. if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
  2120. struct ocfs2_extent_tree et;
  2121. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  2122. *meta_add += ocfs2_extend_meta_needed(et.et_root_el);
  2123. *credits += ocfs2_calc_extend_credits(sb,
  2124. et.et_root_el,
  2125. ref_blocks);
  2126. } else {
  2127. *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
  2128. *meta_add += 1;
  2129. }
  2130. out:
  2131. brelse(ref_leaf_bh);
  2132. brelse(prev_bh);
  2133. return ret;
  2134. }
  2135. /*
  2136. * For refcount tree, we will decrease some contiguous clusters
  2137. * refcount count, so just go through it to see how many blocks
  2138. * we gonna touch and whether we need to create new blocks.
  2139. *
  2140. * Normally the refcount blocks store these refcount should be
  2141. * contiguous also, so that we can get the number easily.
  2142. * We will at most add split 2 refcount records and 2 more
  2143. * refcount blocks, so just check it in a rough way.
  2144. *
  2145. * Caller must hold refcount tree lock.
  2146. */
  2147. int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
  2148. u64 refcount_loc,
  2149. u64 phys_blkno,
  2150. u32 clusters,
  2151. int *credits,
  2152. int *ref_blocks)
  2153. {
  2154. int ret;
  2155. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  2156. struct buffer_head *ref_root_bh = NULL;
  2157. struct ocfs2_refcount_tree *tree;
  2158. u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
  2159. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  2160. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  2161. "tree, but the feature bit is not set in the "
  2162. "super block.", inode->i_ino);
  2163. ret = -EROFS;
  2164. goto out;
  2165. }
  2166. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  2167. ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
  2168. refcount_loc, &tree);
  2169. if (ret) {
  2170. mlog_errno(ret);
  2171. goto out;
  2172. }
  2173. ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc,
  2174. &ref_root_bh);
  2175. if (ret) {
  2176. mlog_errno(ret);
  2177. goto out;
  2178. }
  2179. ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
  2180. &tree->rf_ci,
  2181. ref_root_bh,
  2182. start_cpos, clusters,
  2183. ref_blocks, credits);
  2184. if (ret) {
  2185. mlog_errno(ret);
  2186. goto out;
  2187. }
  2188. mlog(0, "reserve new metadata %d blocks, credits = %d\n",
  2189. *ref_blocks, *credits);
  2190. out:
  2191. brelse(ref_root_bh);
  2192. return ret;
  2193. }
  2194. #define MAX_CONTIG_BYTES 1048576
  2195. static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
  2196. {
  2197. return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
  2198. }
  2199. static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
  2200. {
  2201. return ~(ocfs2_cow_contig_clusters(sb) - 1);
  2202. }
  2203. /*
  2204. * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
  2205. * find an offset (start + (n * contig_clusters)) that is closest to cpos
  2206. * while still being less than or equal to it.
  2207. *
  2208. * The goal is to break the extent at a multiple of contig_clusters.
  2209. */
  2210. static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
  2211. unsigned int start,
  2212. unsigned int cpos)
  2213. {
  2214. BUG_ON(start > cpos);
  2215. return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
  2216. }
  2217. /*
  2218. * Given a cluster count of len, pad it out so that it is a multiple
  2219. * of contig_clusters.
  2220. */
  2221. static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
  2222. unsigned int len)
  2223. {
  2224. unsigned int padded =
  2225. (len + (ocfs2_cow_contig_clusters(sb) - 1)) &
  2226. ocfs2_cow_contig_mask(sb);
  2227. /* Did we wrap? */
  2228. if (padded < len)
  2229. padded = UINT_MAX;
  2230. return padded;
  2231. }
  2232. /*
  2233. * Calculate out the start and number of virtual clusters we need to to CoW.
  2234. *
  2235. * cpos is vitual start cluster position we want to do CoW in a
  2236. * file and write_len is the cluster length.
  2237. * max_cpos is the place where we want to stop CoW intentionally.
  2238. *
  2239. * Normal we will start CoW from the beginning of extent record cotaining cpos.
  2240. * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
  2241. * get good I/O from the resulting extent tree.
  2242. */
  2243. static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
  2244. struct ocfs2_extent_list *el,
  2245. u32 cpos,
  2246. u32 write_len,
  2247. u32 max_cpos,
  2248. u32 *cow_start,
  2249. u32 *cow_len)
  2250. {
  2251. int ret = 0;
  2252. int tree_height = le16_to_cpu(el->l_tree_depth), i;
  2253. struct buffer_head *eb_bh = NULL;
  2254. struct ocfs2_extent_block *eb = NULL;
  2255. struct ocfs2_extent_rec *rec;
  2256. unsigned int want_clusters, rec_end = 0;
  2257. int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
  2258. int leaf_clusters;
  2259. BUG_ON(cpos + write_len > max_cpos);
  2260. if (tree_height > 0) {
  2261. ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
  2262. if (ret) {
  2263. mlog_errno(ret);
  2264. goto out;
  2265. }
  2266. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  2267. el = &eb->h_list;
  2268. if (el->l_tree_depth) {
  2269. ocfs2_error(inode->i_sb,
  2270. "Inode %lu has non zero tree depth in "
  2271. "leaf block %llu\n", inode->i_ino,
  2272. (unsigned long long)eb_bh->b_blocknr);
  2273. ret = -EROFS;
  2274. goto out;
  2275. }
  2276. }
  2277. *cow_len = 0;
  2278. for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
  2279. rec = &el->l_recs[i];
  2280. if (ocfs2_is_empty_extent(rec)) {
  2281. mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
  2282. "index %d\n", inode->i_ino, i);
  2283. continue;
  2284. }
  2285. if (le32_to_cpu(rec->e_cpos) +
  2286. le16_to_cpu(rec->e_leaf_clusters) <= cpos)
  2287. continue;
  2288. if (*cow_len == 0) {
  2289. /*
  2290. * We should find a refcounted record in the
  2291. * first pass.
  2292. */
  2293. BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
  2294. *cow_start = le32_to_cpu(rec->e_cpos);
  2295. }
  2296. /*
  2297. * If we encounter a hole, a non-refcounted record or
  2298. * pass the max_cpos, stop the search.
  2299. */
  2300. if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
  2301. (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) ||
  2302. (max_cpos <= le32_to_cpu(rec->e_cpos)))
  2303. break;
  2304. leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
  2305. rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
  2306. if (rec_end > max_cpos) {
  2307. rec_end = max_cpos;
  2308. leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos);
  2309. }
  2310. /*
  2311. * How many clusters do we actually need from
  2312. * this extent? First we see how many we actually
  2313. * need to complete the write. If that's smaller
  2314. * than contig_clusters, we try for contig_clusters.
  2315. */
  2316. if (!*cow_len)
  2317. want_clusters = write_len;
  2318. else
  2319. want_clusters = (cpos + write_len) -
  2320. (*cow_start + *cow_len);
  2321. if (want_clusters < contig_clusters)
  2322. want_clusters = contig_clusters;
  2323. /*
  2324. * If the write does not cover the whole extent, we
  2325. * need to calculate how we're going to split the extent.
  2326. * We try to do it on contig_clusters boundaries.
  2327. *
  2328. * Any extent smaller than contig_clusters will be
  2329. * CoWed in its entirety.
  2330. */
  2331. if (leaf_clusters <= contig_clusters)
  2332. *cow_len += leaf_clusters;
  2333. else if (*cow_len || (*cow_start == cpos)) {
  2334. /*
  2335. * This extent needs to be CoW'd from its
  2336. * beginning, so all we have to do is compute
  2337. * how many clusters to grab. We align
  2338. * want_clusters to the edge of contig_clusters
  2339. * to get better I/O.
  2340. */
  2341. want_clusters = ocfs2_cow_align_length(inode->i_sb,
  2342. want_clusters);
  2343. if (leaf_clusters < want_clusters)
  2344. *cow_len += leaf_clusters;
  2345. else
  2346. *cow_len += want_clusters;
  2347. } else if ((*cow_start + contig_clusters) >=
  2348. (cpos + write_len)) {
  2349. /*
  2350. * Breaking off contig_clusters at the front
  2351. * of the extent will cover our write. That's
  2352. * easy.
  2353. */
  2354. *cow_len = contig_clusters;
  2355. } else if ((rec_end - cpos) <= contig_clusters) {
  2356. /*
  2357. * Breaking off contig_clusters at the tail of
  2358. * this extent will cover cpos.
  2359. */
  2360. *cow_start = rec_end - contig_clusters;
  2361. *cow_len = contig_clusters;
  2362. } else if ((rec_end - cpos) <= want_clusters) {
  2363. /*
  2364. * While we can't fit the entire write in this
  2365. * extent, we know that the write goes from cpos
  2366. * to the end of the extent. Break that off.
  2367. * We try to break it at some multiple of
  2368. * contig_clusters from the front of the extent.
  2369. * Failing that (ie, cpos is within
  2370. * contig_clusters of the front), we'll CoW the
  2371. * entire extent.
  2372. */
  2373. *cow_start = ocfs2_cow_align_start(inode->i_sb,
  2374. *cow_start, cpos);
  2375. *cow_len = rec_end - *cow_start;
  2376. } else {
  2377. /*
  2378. * Ok, the entire write lives in the middle of
  2379. * this extent. Let's try to slice the extent up
  2380. * nicely. Optimally, our CoW region starts at
  2381. * m*contig_clusters from the beginning of the
  2382. * extent and goes for n*contig_clusters,
  2383. * covering the entire write.
  2384. */
  2385. *cow_start = ocfs2_cow_align_start(inode->i_sb,
  2386. *cow_start, cpos);
  2387. want_clusters = (cpos + write_len) - *cow_start;
  2388. want_clusters = ocfs2_cow_align_length(inode->i_sb,
  2389. want_clusters);
  2390. if (*cow_start + want_clusters <= rec_end)
  2391. *cow_len = want_clusters;
  2392. else
  2393. *cow_len = rec_end - *cow_start;
  2394. }
  2395. /* Have we covered our entire write yet? */
  2396. if ((*cow_start + *cow_len) >= (cpos + write_len))
  2397. break;
  2398. /*
  2399. * If we reach the end of the extent block and don't get enough
  2400. * clusters, continue with the next extent block if possible.
  2401. */
  2402. if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
  2403. eb && eb->h_next_leaf_blk) {
  2404. brelse(eb_bh);
  2405. eb_bh = NULL;
  2406. ret = ocfs2_read_extent_block(INODE_CACHE(inode),
  2407. le64_to_cpu(eb->h_next_leaf_blk),
  2408. &eb_bh);
  2409. if (ret) {
  2410. mlog_errno(ret);
  2411. goto out;
  2412. }
  2413. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  2414. el = &eb->h_list;
  2415. i = -1;
  2416. }
  2417. }
  2418. out:
  2419. brelse(eb_bh);
  2420. return ret;
  2421. }
  2422. /*
  2423. * Prepare meta_ac, data_ac and calculate credits when we want to add some
  2424. * num_clusters in data_tree "et" and change the refcount for the old
  2425. * clusters(starting form p_cluster) in the refcount tree.
  2426. *
  2427. * Note:
  2428. * 1. since we may split the old tree, so we at most will need num_clusters + 2
  2429. * more new leaf records.
  2430. * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
  2431. * just give data_ac = NULL.
  2432. */
  2433. static int ocfs2_lock_refcount_allocators(struct super_block *sb,
  2434. u32 p_cluster, u32 num_clusters,
  2435. struct ocfs2_extent_tree *et,
  2436. struct ocfs2_caching_info *ref_ci,
  2437. struct buffer_head *ref_root_bh,
  2438. struct ocfs2_alloc_context **meta_ac,
  2439. struct ocfs2_alloc_context **data_ac,
  2440. int *credits)
  2441. {
  2442. int ret = 0, meta_add = 0;
  2443. int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et);
  2444. if (num_free_extents < 0) {
  2445. ret = num_free_extents;
  2446. mlog_errno(ret);
  2447. goto out;
  2448. }
  2449. if (num_free_extents < num_clusters + 2)
  2450. meta_add =
  2451. ocfs2_extend_meta_needed(et->et_root_el);
  2452. *credits += ocfs2_calc_extend_credits(sb, et->et_root_el,
  2453. num_clusters + 2);
  2454. ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
  2455. p_cluster, num_clusters,
  2456. &meta_add, credits);
  2457. if (ret) {
  2458. mlog_errno(ret);
  2459. goto out;
  2460. }
  2461. mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n",
  2462. meta_add, num_clusters, *credits);
  2463. ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
  2464. meta_ac);
  2465. if (ret) {
  2466. mlog_errno(ret);
  2467. goto out;
  2468. }
  2469. if (data_ac) {
  2470. ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
  2471. data_ac);
  2472. if (ret)
  2473. mlog_errno(ret);
  2474. }
  2475. out:
  2476. if (ret) {
  2477. if (*meta_ac) {
  2478. ocfs2_free_alloc_context(*meta_ac);
  2479. *meta_ac = NULL;
  2480. }
  2481. }
  2482. return ret;
  2483. }
  2484. static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
  2485. {
  2486. BUG_ON(buffer_dirty(bh));
  2487. clear_buffer_mapped(bh);
  2488. return 0;
  2489. }
  2490. static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
  2491. struct ocfs2_cow_context *context,
  2492. u32 cpos, u32 old_cluster,
  2493. u32 new_cluster, u32 new_len)
  2494. {
  2495. int ret = 0, partial;
  2496. struct ocfs2_caching_info *ci = context->data_et.et_ci;
  2497. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  2498. u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
  2499. struct page *page;
  2500. pgoff_t page_index;
  2501. unsigned int from, to;
  2502. loff_t offset, end, map_end;
  2503. struct address_space *mapping = context->inode->i_mapping;
  2504. mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster,
  2505. new_cluster, new_len, cpos);
  2506. offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
  2507. end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
  2508. while (offset < end) {
  2509. page_index = offset >> PAGE_CACHE_SHIFT;
  2510. map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
  2511. if (map_end > end)
  2512. map_end = end;
  2513. /* from, to is the offset within the page. */
  2514. from = offset & (PAGE_CACHE_SIZE - 1);
  2515. to = PAGE_CACHE_SIZE;
  2516. if (map_end & (PAGE_CACHE_SIZE - 1))
  2517. to = map_end & (PAGE_CACHE_SIZE - 1);
  2518. page = grab_cache_page(mapping, page_index);
  2519. /*
  2520. * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
  2521. * can't be dirtied before we CoW it out.
  2522. */
  2523. if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
  2524. BUG_ON(PageDirty(page));
  2525. if (!PageUptodate(page)) {
  2526. ret = block_read_full_page(page, ocfs2_get_block);
  2527. if (ret) {
  2528. mlog_errno(ret);
  2529. goto unlock;
  2530. }
  2531. lock_page(page);
  2532. }
  2533. if (page_has_buffers(page)) {
  2534. ret = walk_page_buffers(handle, page_buffers(page),
  2535. from, to, &partial,
  2536. ocfs2_clear_cow_buffer);
  2537. if (ret) {
  2538. mlog_errno(ret);
  2539. goto unlock;
  2540. }
  2541. }
  2542. ocfs2_map_and_dirty_page(context->inode,
  2543. handle, from, to,
  2544. page, 0, &new_block);
  2545. mark_page_accessed(page);
  2546. unlock:
  2547. unlock_page(page);
  2548. page_cache_release(page);
  2549. page = NULL;
  2550. offset = map_end;
  2551. if (ret)
  2552. break;
  2553. }
  2554. return ret;
  2555. }
  2556. static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
  2557. struct ocfs2_cow_context *context,
  2558. u32 cpos, u32 old_cluster,
  2559. u32 new_cluster, u32 new_len)
  2560. {
  2561. int ret = 0;
  2562. struct super_block *sb = context->inode->i_sb;
  2563. struct ocfs2_caching_info *ci = context->data_et.et_ci;
  2564. int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
  2565. u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
  2566. u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
  2567. struct ocfs2_super *osb = OCFS2_SB(sb);
  2568. struct buffer_head *old_bh = NULL;
  2569. struct buffer_head *new_bh = NULL;
  2570. mlog(0, "old_cluster %u, new %u, len %u\n", old_cluster,
  2571. new_cluster, new_len);
  2572. for (i = 0; i < blocks; i++, old_block++, new_block++) {
  2573. new_bh = sb_getblk(osb->sb, new_block);
  2574. if (new_bh == NULL) {
  2575. ret = -EIO;
  2576. mlog_errno(ret);
  2577. break;
  2578. }
  2579. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  2580. ret = ocfs2_read_block(ci, old_block, &old_bh, NULL);
  2581. if (ret) {
  2582. mlog_errno(ret);
  2583. break;
  2584. }
  2585. ret = ocfs2_journal_access(handle, ci, new_bh,
  2586. OCFS2_JOURNAL_ACCESS_CREATE);
  2587. if (ret) {
  2588. mlog_errno(ret);
  2589. break;
  2590. }
  2591. memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
  2592. ocfs2_journal_dirty(handle, new_bh);
  2593. brelse(new_bh);
  2594. brelse(old_bh);
  2595. new_bh = NULL;
  2596. old_bh = NULL;
  2597. }
  2598. brelse(new_bh);
  2599. brelse(old_bh);
  2600. return ret;
  2601. }
  2602. static int ocfs2_clear_ext_refcount(handle_t *handle,
  2603. struct ocfs2_extent_tree *et,
  2604. u32 cpos, u32 p_cluster, u32 len,
  2605. unsigned int ext_flags,
  2606. struct ocfs2_alloc_context *meta_ac,
  2607. struct ocfs2_cached_dealloc_ctxt *dealloc)
  2608. {
  2609. int ret, index;
  2610. struct ocfs2_extent_rec replace_rec;
  2611. struct ocfs2_path *path = NULL;
  2612. struct ocfs2_extent_list *el;
  2613. struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
  2614. u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
  2615. mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n",
  2616. (unsigned long long)ino, cpos, len, p_cluster, ext_flags);
  2617. memset(&replace_rec, 0, sizeof(replace_rec));
  2618. replace_rec.e_cpos = cpu_to_le32(cpos);
  2619. replace_rec.e_leaf_clusters = cpu_to_le16(len);
  2620. replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
  2621. p_cluster));
  2622. replace_rec.e_flags = ext_flags;
  2623. replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
  2624. path = ocfs2_new_path_from_et(et);
  2625. if (!path) {
  2626. ret = -ENOMEM;
  2627. mlog_errno(ret);
  2628. goto out;
  2629. }
  2630. ret = ocfs2_find_path(et->et_ci, path, cpos);
  2631. if (ret) {
  2632. mlog_errno(ret);
  2633. goto out;
  2634. }
  2635. el = path_leaf_el(path);
  2636. index = ocfs2_search_extent_list(el, cpos);
  2637. if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
  2638. ocfs2_error(sb,
  2639. "Inode %llu has an extent at cpos %u which can no "
  2640. "longer be found.\n",
  2641. (unsigned long long)ino, cpos);
  2642. ret = -EROFS;
  2643. goto out;
  2644. }
  2645. ret = ocfs2_split_extent(handle, et, path, index,
  2646. &replace_rec, meta_ac, dealloc);
  2647. if (ret)
  2648. mlog_errno(ret);
  2649. out:
  2650. ocfs2_free_path(path);
  2651. return ret;
  2652. }
  2653. static int ocfs2_replace_clusters(handle_t *handle,
  2654. struct ocfs2_cow_context *context,
  2655. u32 cpos, u32 old,
  2656. u32 new, u32 len,
  2657. unsigned int ext_flags)
  2658. {
  2659. int ret;
  2660. struct ocfs2_caching_info *ci = context->data_et.et_ci;
  2661. u64 ino = ocfs2_metadata_cache_owner(ci);
  2662. mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n",
  2663. (unsigned long long)ino, cpos, old, new, len, ext_flags);
  2664. /*If the old clusters is unwritten, no need to duplicate. */
  2665. if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
  2666. ret = context->cow_duplicate_clusters(handle, context, cpos,
  2667. old, new, len);
  2668. if (ret) {
  2669. mlog_errno(ret);
  2670. goto out;
  2671. }
  2672. }
  2673. ret = ocfs2_clear_ext_refcount(handle, &context->data_et,
  2674. cpos, new, len, ext_flags,
  2675. context->meta_ac, &context->dealloc);
  2676. if (ret)
  2677. mlog_errno(ret);
  2678. out:
  2679. return ret;
  2680. }
  2681. static int ocfs2_cow_sync_writeback(struct super_block *sb,
  2682. struct ocfs2_cow_context *context,
  2683. u32 cpos, u32 num_clusters)
  2684. {
  2685. int ret = 0;
  2686. loff_t offset, end, map_end;
  2687. pgoff_t page_index;
  2688. struct page *page;
  2689. if (ocfs2_should_order_data(context->inode))
  2690. return 0;
  2691. offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
  2692. end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
  2693. ret = filemap_fdatawrite_range(context->inode->i_mapping,
  2694. offset, end - 1);
  2695. if (ret < 0) {
  2696. mlog_errno(ret);
  2697. return ret;
  2698. }
  2699. while (offset < end) {
  2700. page_index = offset >> PAGE_CACHE_SHIFT;
  2701. map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
  2702. if (map_end > end)
  2703. map_end = end;
  2704. page = grab_cache_page(context->inode->i_mapping, page_index);
  2705. BUG_ON(!page);
  2706. wait_on_page_writeback(page);
  2707. if (PageError(page)) {
  2708. ret = -EIO;
  2709. mlog_errno(ret);
  2710. } else
  2711. mark_page_accessed(page);
  2712. unlock_page(page);
  2713. page_cache_release(page);
  2714. page = NULL;
  2715. offset = map_end;
  2716. if (ret)
  2717. break;
  2718. }
  2719. return ret;
  2720. }
  2721. static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context,
  2722. u32 v_cluster, u32 *p_cluster,
  2723. u32 *num_clusters,
  2724. unsigned int *extent_flags)
  2725. {
  2726. return ocfs2_get_clusters(context->inode, v_cluster, p_cluster,
  2727. num_clusters, extent_flags);
  2728. }
  2729. static int ocfs2_make_clusters_writable(struct super_block *sb,
  2730. struct ocfs2_cow_context *context,
  2731. u32 cpos, u32 p_cluster,
  2732. u32 num_clusters, unsigned int e_flags)
  2733. {
  2734. int ret, delete, index, credits = 0;
  2735. u32 new_bit, new_len;
  2736. unsigned int set_len;
  2737. struct ocfs2_super *osb = OCFS2_SB(sb);
  2738. handle_t *handle;
  2739. struct buffer_head *ref_leaf_bh = NULL;
  2740. struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
  2741. struct ocfs2_refcount_rec rec;
  2742. mlog(0, "cpos %u, p_cluster %u, num_clusters %u, e_flags %u\n",
  2743. cpos, p_cluster, num_clusters, e_flags);
  2744. ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
  2745. &context->data_et,
  2746. ref_ci,
  2747. context->ref_root_bh,
  2748. &context->meta_ac,
  2749. &context->data_ac, &credits);
  2750. if (ret) {
  2751. mlog_errno(ret);
  2752. return ret;
  2753. }
  2754. if (context->post_refcount)
  2755. credits += context->post_refcount->credits;
  2756. credits += context->extra_credits;
  2757. handle = ocfs2_start_trans(osb, credits);
  2758. if (IS_ERR(handle)) {
  2759. ret = PTR_ERR(handle);
  2760. mlog_errno(ret);
  2761. goto out;
  2762. }
  2763. while (num_clusters) {
  2764. ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
  2765. p_cluster, num_clusters,
  2766. &rec, &index, &ref_leaf_bh);
  2767. if (ret) {
  2768. mlog_errno(ret);
  2769. goto out_commit;
  2770. }
  2771. BUG_ON(!rec.r_refcount);
  2772. set_len = min((u64)p_cluster + num_clusters,
  2773. le64_to_cpu(rec.r_cpos) +
  2774. le32_to_cpu(rec.r_clusters)) - p_cluster;
  2775. /*
  2776. * There are many different situation here.
  2777. * 1. If refcount == 1, remove the flag and don't COW.
  2778. * 2. If refcount > 1, allocate clusters.
  2779. * Here we may not allocate r_len once at a time, so continue
  2780. * until we reach num_clusters.
  2781. */
  2782. if (le32_to_cpu(rec.r_refcount) == 1) {
  2783. delete = 0;
  2784. ret = ocfs2_clear_ext_refcount(handle,
  2785. &context->data_et,
  2786. cpos, p_cluster,
  2787. set_len, e_flags,
  2788. context->meta_ac,
  2789. &context->dealloc);
  2790. if (ret) {
  2791. mlog_errno(ret);
  2792. goto out_commit;
  2793. }
  2794. } else {
  2795. delete = 1;
  2796. ret = __ocfs2_claim_clusters(osb, handle,
  2797. context->data_ac,
  2798. 1, set_len,
  2799. &new_bit, &new_len);
  2800. if (ret) {
  2801. mlog_errno(ret);
  2802. goto out_commit;
  2803. }
  2804. ret = ocfs2_replace_clusters(handle, context,
  2805. cpos, p_cluster, new_bit,
  2806. new_len, e_flags);
  2807. if (ret) {
  2808. mlog_errno(ret);
  2809. goto out_commit;
  2810. }
  2811. set_len = new_len;
  2812. }
  2813. ret = __ocfs2_decrease_refcount(handle, ref_ci,
  2814. context->ref_root_bh,
  2815. p_cluster, set_len,
  2816. context->meta_ac,
  2817. &context->dealloc, delete);
  2818. if (ret) {
  2819. mlog_errno(ret);
  2820. goto out_commit;
  2821. }
  2822. cpos += set_len;
  2823. p_cluster += set_len;
  2824. num_clusters -= set_len;
  2825. brelse(ref_leaf_bh);
  2826. ref_leaf_bh = NULL;
  2827. }
  2828. /* handle any post_cow action. */
  2829. if (context->post_refcount && context->post_refcount->func) {
  2830. ret = context->post_refcount->func(context->inode, handle,
  2831. context->post_refcount->para);
  2832. if (ret) {
  2833. mlog_errno(ret);
  2834. goto out_commit;
  2835. }
  2836. }
  2837. /*
  2838. * Here we should write the new page out first if we are
  2839. * in write-back mode.
  2840. */
  2841. if (context->get_clusters == ocfs2_di_get_clusters) {
  2842. ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
  2843. if (ret)
  2844. mlog_errno(ret);
  2845. }
  2846. out_commit:
  2847. ocfs2_commit_trans(osb, handle);
  2848. out:
  2849. if (context->data_ac) {
  2850. ocfs2_free_alloc_context(context->data_ac);
  2851. context->data_ac = NULL;
  2852. }
  2853. if (context->meta_ac) {
  2854. ocfs2_free_alloc_context(context->meta_ac);
  2855. context->meta_ac = NULL;
  2856. }
  2857. brelse(ref_leaf_bh);
  2858. return ret;
  2859. }
  2860. static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
  2861. {
  2862. int ret = 0;
  2863. struct inode *inode = context->inode;
  2864. u32 cow_start = context->cow_start, cow_len = context->cow_len;
  2865. u32 p_cluster, num_clusters;
  2866. unsigned int ext_flags;
  2867. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  2868. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  2869. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  2870. "tree, but the feature bit is not set in the "
  2871. "super block.", inode->i_ino);
  2872. return -EROFS;
  2873. }
  2874. ocfs2_init_dealloc_ctxt(&context->dealloc);
  2875. while (cow_len) {
  2876. ret = context->get_clusters(context, cow_start, &p_cluster,
  2877. &num_clusters, &ext_flags);
  2878. if (ret) {
  2879. mlog_errno(ret);
  2880. break;
  2881. }
  2882. BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
  2883. if (cow_len < num_clusters)
  2884. num_clusters = cow_len;
  2885. ret = ocfs2_make_clusters_writable(inode->i_sb, context,
  2886. cow_start, p_cluster,
  2887. num_clusters, ext_flags);
  2888. if (ret) {
  2889. mlog_errno(ret);
  2890. break;
  2891. }
  2892. cow_len -= num_clusters;
  2893. cow_start += num_clusters;
  2894. }
  2895. if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
  2896. ocfs2_schedule_truncate_log_flush(osb, 1);
  2897. ocfs2_run_deallocs(osb, &context->dealloc);
  2898. }
  2899. return ret;
  2900. }
  2901. /*
  2902. * Starting at cpos, try to CoW write_len clusters. Don't CoW
  2903. * past max_cpos. This will stop when it runs into a hole or an
  2904. * unrefcounted extent.
  2905. */
  2906. static int ocfs2_refcount_cow_hunk(struct inode *inode,
  2907. struct buffer_head *di_bh,
  2908. u32 cpos, u32 write_len, u32 max_cpos)
  2909. {
  2910. int ret;
  2911. u32 cow_start = 0, cow_len = 0;
  2912. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  2913. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  2914. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  2915. struct buffer_head *ref_root_bh = NULL;
  2916. struct ocfs2_refcount_tree *ref_tree;
  2917. struct ocfs2_cow_context *context = NULL;
  2918. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  2919. ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list,
  2920. cpos, write_len, max_cpos,
  2921. &cow_start, &cow_len);
  2922. if (ret) {
  2923. mlog_errno(ret);
  2924. goto out;
  2925. }
  2926. mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, "
  2927. "cow_len %u\n", inode->i_ino,
  2928. cpos, write_len, cow_start, cow_len);
  2929. BUG_ON(cow_len == 0);
  2930. context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
  2931. if (!context) {
  2932. ret = -ENOMEM;
  2933. mlog_errno(ret);
  2934. goto out;
  2935. }
  2936. ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
  2937. 1, &ref_tree, &ref_root_bh);
  2938. if (ret) {
  2939. mlog_errno(ret);
  2940. goto out;
  2941. }
  2942. context->inode = inode;
  2943. context->cow_start = cow_start;
  2944. context->cow_len = cow_len;
  2945. context->ref_tree = ref_tree;
  2946. context->ref_root_bh = ref_root_bh;
  2947. context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
  2948. context->get_clusters = ocfs2_di_get_clusters;
  2949. ocfs2_init_dinode_extent_tree(&context->data_et,
  2950. INODE_CACHE(inode), di_bh);
  2951. ret = ocfs2_replace_cow(context);
  2952. if (ret)
  2953. mlog_errno(ret);
  2954. /*
  2955. * truncate the extent map here since no matter whether we meet with
  2956. * any error during the action, we shouldn't trust cached extent map
  2957. * any more.
  2958. */
  2959. ocfs2_extent_map_trunc(inode, cow_start);
  2960. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  2961. brelse(ref_root_bh);
  2962. out:
  2963. kfree(context);
  2964. return ret;
  2965. }
  2966. /*
  2967. * CoW any and all clusters between cpos and cpos+write_len.
  2968. * Don't CoW past max_cpos. If this returns successfully, all
  2969. * clusters between cpos and cpos+write_len are safe to modify.
  2970. */
  2971. int ocfs2_refcount_cow(struct inode *inode,
  2972. struct buffer_head *di_bh,
  2973. u32 cpos, u32 write_len, u32 max_cpos)
  2974. {
  2975. int ret = 0;
  2976. u32 p_cluster, num_clusters;
  2977. unsigned int ext_flags;
  2978. while (write_len) {
  2979. ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
  2980. &num_clusters, &ext_flags);
  2981. if (ret) {
  2982. mlog_errno(ret);
  2983. break;
  2984. }
  2985. if (write_len < num_clusters)
  2986. num_clusters = write_len;
  2987. if (ext_flags & OCFS2_EXT_REFCOUNTED) {
  2988. ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
  2989. num_clusters, max_cpos);
  2990. if (ret) {
  2991. mlog_errno(ret);
  2992. break;
  2993. }
  2994. }
  2995. write_len -= num_clusters;
  2996. cpos += num_clusters;
  2997. }
  2998. return ret;
  2999. }
  3000. static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context,
  3001. u32 v_cluster, u32 *p_cluster,
  3002. u32 *num_clusters,
  3003. unsigned int *extent_flags)
  3004. {
  3005. struct inode *inode = context->inode;
  3006. struct ocfs2_xattr_value_root *xv = context->cow_object;
  3007. return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster,
  3008. num_clusters, &xv->xr_list,
  3009. extent_flags);
  3010. }
  3011. /*
  3012. * Given a xattr value root, calculate the most meta/credits we need for
  3013. * refcount tree change if we truncate it to 0.
  3014. */
  3015. int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
  3016. struct ocfs2_caching_info *ref_ci,
  3017. struct buffer_head *ref_root_bh,
  3018. struct ocfs2_xattr_value_root *xv,
  3019. int *meta_add, int *credits)
  3020. {
  3021. int ret = 0, index, ref_blocks = 0;
  3022. u32 p_cluster, num_clusters;
  3023. u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters);
  3024. struct ocfs2_refcount_block *rb;
  3025. struct ocfs2_refcount_rec rec;
  3026. struct buffer_head *ref_leaf_bh = NULL;
  3027. while (cpos < clusters) {
  3028. ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
  3029. &num_clusters, &xv->xr_list,
  3030. NULL);
  3031. if (ret) {
  3032. mlog_errno(ret);
  3033. goto out;
  3034. }
  3035. cpos += num_clusters;
  3036. while (num_clusters) {
  3037. ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh,
  3038. p_cluster, num_clusters,
  3039. &rec, &index,
  3040. &ref_leaf_bh);
  3041. if (ret) {
  3042. mlog_errno(ret);
  3043. goto out;
  3044. }
  3045. BUG_ON(!rec.r_refcount);
  3046. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  3047. /*
  3048. * We really don't know whether the other clusters is in
  3049. * this refcount block or not, so just take the worst
  3050. * case that all the clusters are in this block and each
  3051. * one will split a refcount rec, so totally we need
  3052. * clusters * 2 new refcount rec.
  3053. */
  3054. if (le64_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
  3055. le16_to_cpu(rb->rf_records.rl_count))
  3056. ref_blocks++;
  3057. *credits += 1;
  3058. brelse(ref_leaf_bh);
  3059. ref_leaf_bh = NULL;
  3060. if (num_clusters <= le32_to_cpu(rec.r_clusters))
  3061. break;
  3062. else
  3063. num_clusters -= le32_to_cpu(rec.r_clusters);
  3064. p_cluster += num_clusters;
  3065. }
  3066. }
  3067. *meta_add += ref_blocks;
  3068. if (!ref_blocks)
  3069. goto out;
  3070. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  3071. if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
  3072. *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
  3073. else {
  3074. struct ocfs2_extent_tree et;
  3075. ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
  3076. *credits += ocfs2_calc_extend_credits(inode->i_sb,
  3077. et.et_root_el,
  3078. ref_blocks);
  3079. }
  3080. out:
  3081. brelse(ref_leaf_bh);
  3082. return ret;
  3083. }
  3084. /*
  3085. * Do CoW for xattr.
  3086. */
  3087. int ocfs2_refcount_cow_xattr(struct inode *inode,
  3088. struct ocfs2_dinode *di,
  3089. struct ocfs2_xattr_value_buf *vb,
  3090. struct ocfs2_refcount_tree *ref_tree,
  3091. struct buffer_head *ref_root_bh,
  3092. u32 cpos, u32 write_len,
  3093. struct ocfs2_post_refcount *post)
  3094. {
  3095. int ret;
  3096. struct ocfs2_xattr_value_root *xv = vb->vb_xv;
  3097. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  3098. struct ocfs2_cow_context *context = NULL;
  3099. u32 cow_start, cow_len;
  3100. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  3101. ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list,
  3102. cpos, write_len, UINT_MAX,
  3103. &cow_start, &cow_len);
  3104. if (ret) {
  3105. mlog_errno(ret);
  3106. goto out;
  3107. }
  3108. BUG_ON(cow_len == 0);
  3109. context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
  3110. if (!context) {
  3111. ret = -ENOMEM;
  3112. mlog_errno(ret);
  3113. goto out;
  3114. }
  3115. context->inode = inode;
  3116. context->cow_start = cow_start;
  3117. context->cow_len = cow_len;
  3118. context->ref_tree = ref_tree;
  3119. context->ref_root_bh = ref_root_bh;;
  3120. context->cow_object = xv;
  3121. context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd;
  3122. /* We need the extra credits for duplicate_clusters by jbd. */
  3123. context->extra_credits =
  3124. ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len;
  3125. context->get_clusters = ocfs2_xattr_value_get_clusters;
  3126. context->post_refcount = post;
  3127. ocfs2_init_xattr_value_extent_tree(&context->data_et,
  3128. INODE_CACHE(inode), vb);
  3129. ret = ocfs2_replace_cow(context);
  3130. if (ret)
  3131. mlog_errno(ret);
  3132. out:
  3133. kfree(context);
  3134. return ret;
  3135. }
  3136. /*
  3137. * Insert a new extent into refcount tree and mark a extent rec
  3138. * as refcounted in the dinode tree.
  3139. */
  3140. int ocfs2_add_refcount_flag(struct inode *inode,
  3141. struct ocfs2_extent_tree *data_et,
  3142. struct ocfs2_caching_info *ref_ci,
  3143. struct buffer_head *ref_root_bh,
  3144. u32 cpos, u32 p_cluster, u32 num_clusters,
  3145. struct ocfs2_cached_dealloc_ctxt *dealloc,
  3146. struct ocfs2_post_refcount *post)
  3147. {
  3148. int ret;
  3149. handle_t *handle;
  3150. int credits = 1, ref_blocks = 0;
  3151. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3152. struct ocfs2_alloc_context *meta_ac = NULL;
  3153. ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
  3154. ref_ci, ref_root_bh,
  3155. p_cluster, num_clusters,
  3156. &ref_blocks, &credits);
  3157. if (ret) {
  3158. mlog_errno(ret);
  3159. goto out;
  3160. }
  3161. mlog(0, "reserve new metadata %d, credits = %d\n",
  3162. ref_blocks, credits);
  3163. if (ref_blocks) {
  3164. ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
  3165. ref_blocks, &meta_ac);
  3166. if (ret) {
  3167. mlog_errno(ret);
  3168. goto out;
  3169. }
  3170. }
  3171. if (post)
  3172. credits += post->credits;
  3173. handle = ocfs2_start_trans(osb, credits);
  3174. if (IS_ERR(handle)) {
  3175. ret = PTR_ERR(handle);
  3176. mlog_errno(ret);
  3177. goto out;
  3178. }
  3179. ret = ocfs2_mark_extent_refcounted(inode, data_et, handle,
  3180. cpos, num_clusters, p_cluster,
  3181. meta_ac, dealloc);
  3182. if (ret) {
  3183. mlog_errno(ret);
  3184. goto out_commit;
  3185. }
  3186. ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
  3187. p_cluster, num_clusters, 0,
  3188. meta_ac, dealloc);
  3189. if (ret) {
  3190. mlog_errno(ret);
  3191. goto out_commit;
  3192. }
  3193. if (post && post->func) {
  3194. ret = post->func(inode, handle, post->para);
  3195. if (ret)
  3196. mlog_errno(ret);
  3197. }
  3198. out_commit:
  3199. ocfs2_commit_trans(osb, handle);
  3200. out:
  3201. if (meta_ac)
  3202. ocfs2_free_alloc_context(meta_ac);
  3203. return ret;
  3204. }
  3205. static int ocfs2_change_ctime(struct inode *inode,
  3206. struct buffer_head *di_bh)
  3207. {
  3208. int ret;
  3209. handle_t *handle;
  3210. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  3211. handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
  3212. OCFS2_INODE_UPDATE_CREDITS);
  3213. if (IS_ERR(handle)) {
  3214. ret = PTR_ERR(handle);
  3215. mlog_errno(ret);
  3216. goto out;
  3217. }
  3218. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  3219. OCFS2_JOURNAL_ACCESS_WRITE);
  3220. if (ret) {
  3221. mlog_errno(ret);
  3222. goto out_commit;
  3223. }
  3224. inode->i_ctime = CURRENT_TIME;
  3225. di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  3226. di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  3227. ocfs2_journal_dirty(handle, di_bh);
  3228. out_commit:
  3229. ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
  3230. out:
  3231. return ret;
  3232. }
  3233. static int ocfs2_attach_refcount_tree(struct inode *inode,
  3234. struct buffer_head *di_bh)
  3235. {
  3236. int ret, data_changed = 0;
  3237. struct buffer_head *ref_root_bh = NULL;
  3238. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  3239. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  3240. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3241. struct ocfs2_refcount_tree *ref_tree;
  3242. unsigned int ext_flags;
  3243. loff_t size;
  3244. u32 cpos, num_clusters, clusters, p_cluster;
  3245. struct ocfs2_cached_dealloc_ctxt dealloc;
  3246. struct ocfs2_extent_tree di_et;
  3247. ocfs2_init_dealloc_ctxt(&dealloc);
  3248. if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) {
  3249. ret = ocfs2_create_refcount_tree(inode, di_bh);
  3250. if (ret) {
  3251. mlog_errno(ret);
  3252. goto out;
  3253. }
  3254. }
  3255. BUG_ON(!di->i_refcount_loc);
  3256. ret = ocfs2_lock_refcount_tree(osb,
  3257. le64_to_cpu(di->i_refcount_loc), 1,
  3258. &ref_tree, &ref_root_bh);
  3259. if (ret) {
  3260. mlog_errno(ret);
  3261. goto out;
  3262. }
  3263. if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
  3264. goto attach_xattr;
  3265. ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
  3266. size = i_size_read(inode);
  3267. clusters = ocfs2_clusters_for_bytes(inode->i_sb, size);
  3268. cpos = 0;
  3269. while (cpos < clusters) {
  3270. ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
  3271. &num_clusters, &ext_flags);
  3272. if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
  3273. ret = ocfs2_add_refcount_flag(inode, &di_et,
  3274. &ref_tree->rf_ci,
  3275. ref_root_bh, cpos,
  3276. p_cluster, num_clusters,
  3277. &dealloc, NULL);
  3278. if (ret) {
  3279. mlog_errno(ret);
  3280. goto unlock;
  3281. }
  3282. data_changed = 1;
  3283. }
  3284. cpos += num_clusters;
  3285. }
  3286. attach_xattr:
  3287. if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
  3288. ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
  3289. &ref_tree->rf_ci,
  3290. ref_root_bh,
  3291. &dealloc);
  3292. if (ret) {
  3293. mlog_errno(ret);
  3294. goto unlock;
  3295. }
  3296. }
  3297. if (data_changed) {
  3298. ret = ocfs2_change_ctime(inode, di_bh);
  3299. if (ret)
  3300. mlog_errno(ret);
  3301. }
  3302. unlock:
  3303. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  3304. brelse(ref_root_bh);
  3305. if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) {
  3306. ocfs2_schedule_truncate_log_flush(osb, 1);
  3307. ocfs2_run_deallocs(osb, &dealloc);
  3308. }
  3309. out:
  3310. /*
  3311. * Empty the extent map so that we may get the right extent
  3312. * record from the disk.
  3313. */
  3314. ocfs2_extent_map_trunc(inode, 0);
  3315. return ret;
  3316. }
  3317. static int ocfs2_add_refcounted_extent(struct inode *inode,
  3318. struct ocfs2_extent_tree *et,
  3319. struct ocfs2_caching_info *ref_ci,
  3320. struct buffer_head *ref_root_bh,
  3321. u32 cpos, u32 p_cluster, u32 num_clusters,
  3322. unsigned int ext_flags,
  3323. struct ocfs2_cached_dealloc_ctxt *dealloc)
  3324. {
  3325. int ret;
  3326. handle_t *handle;
  3327. int credits = 0;
  3328. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3329. struct ocfs2_alloc_context *meta_ac = NULL;
  3330. ret = ocfs2_lock_refcount_allocators(inode->i_sb,
  3331. p_cluster, num_clusters,
  3332. et, ref_ci,
  3333. ref_root_bh, &meta_ac,
  3334. NULL, &credits);
  3335. if (ret) {
  3336. mlog_errno(ret);
  3337. goto out;
  3338. }
  3339. handle = ocfs2_start_trans(osb, credits);
  3340. if (IS_ERR(handle)) {
  3341. ret = PTR_ERR(handle);
  3342. mlog_errno(ret);
  3343. goto out;
  3344. }
  3345. ret = ocfs2_insert_extent(handle, et, cpos,
  3346. ocfs2_clusters_to_blocks(inode->i_sb, p_cluster),
  3347. num_clusters, ext_flags, meta_ac);
  3348. if (ret) {
  3349. mlog_errno(ret);
  3350. goto out_commit;
  3351. }
  3352. ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
  3353. p_cluster, num_clusters,
  3354. meta_ac, dealloc);
  3355. if (ret)
  3356. mlog_errno(ret);
  3357. out_commit:
  3358. ocfs2_commit_trans(osb, handle);
  3359. out:
  3360. if (meta_ac)
  3361. ocfs2_free_alloc_context(meta_ac);
  3362. return ret;
  3363. }
  3364. static int ocfs2_duplicate_inline_data(struct inode *s_inode,
  3365. struct buffer_head *s_bh,
  3366. struct inode *t_inode,
  3367. struct buffer_head *t_bh)
  3368. {
  3369. int ret;
  3370. handle_t *handle;
  3371. struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
  3372. struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
  3373. struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
  3374. BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
  3375. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
  3376. if (IS_ERR(handle)) {
  3377. ret = PTR_ERR(handle);
  3378. mlog_errno(ret);
  3379. goto out;
  3380. }
  3381. ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
  3382. OCFS2_JOURNAL_ACCESS_WRITE);
  3383. if (ret) {
  3384. mlog_errno(ret);
  3385. goto out_commit;
  3386. }
  3387. t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
  3388. memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
  3389. le16_to_cpu(s_di->id2.i_data.id_count));
  3390. spin_lock(&OCFS2_I(t_inode)->ip_lock);
  3391. OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
  3392. t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
  3393. spin_unlock(&OCFS2_I(t_inode)->ip_lock);
  3394. ocfs2_journal_dirty(handle, t_bh);
  3395. out_commit:
  3396. ocfs2_commit_trans(osb, handle);
  3397. out:
  3398. return ret;
  3399. }
  3400. static int ocfs2_duplicate_extent_list(struct inode *s_inode,
  3401. struct inode *t_inode,
  3402. struct buffer_head *t_bh,
  3403. struct ocfs2_caching_info *ref_ci,
  3404. struct buffer_head *ref_root_bh,
  3405. struct ocfs2_cached_dealloc_ctxt *dealloc)
  3406. {
  3407. int ret = 0;
  3408. u32 p_cluster, num_clusters, clusters, cpos;
  3409. loff_t size;
  3410. unsigned int ext_flags;
  3411. struct ocfs2_extent_tree et;
  3412. ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh);
  3413. size = i_size_read(s_inode);
  3414. clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size);
  3415. cpos = 0;
  3416. while (cpos < clusters) {
  3417. ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
  3418. &num_clusters, &ext_flags);
  3419. if (p_cluster) {
  3420. ret = ocfs2_add_refcounted_extent(t_inode, &et,
  3421. ref_ci, ref_root_bh,
  3422. cpos, p_cluster,
  3423. num_clusters,
  3424. ext_flags,
  3425. dealloc);
  3426. if (ret) {
  3427. mlog_errno(ret);
  3428. goto out;
  3429. }
  3430. }
  3431. cpos += num_clusters;
  3432. }
  3433. out:
  3434. return ret;
  3435. }
  3436. /*
  3437. * change the new file's attributes to the src.
  3438. *
  3439. * reflink creates a snapshot of a file, that means the attributes
  3440. * must be identical except for three exceptions - nlink, ino, and ctime.
  3441. */
  3442. static int ocfs2_complete_reflink(struct inode *s_inode,
  3443. struct buffer_head *s_bh,
  3444. struct inode *t_inode,
  3445. struct buffer_head *t_bh,
  3446. bool preserve)
  3447. {
  3448. int ret;
  3449. handle_t *handle;
  3450. struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
  3451. struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data;
  3452. loff_t size = i_size_read(s_inode);
  3453. handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb),
  3454. OCFS2_INODE_UPDATE_CREDITS);
  3455. if (IS_ERR(handle)) {
  3456. ret = PTR_ERR(handle);
  3457. mlog_errno(ret);
  3458. return ret;
  3459. }
  3460. ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
  3461. OCFS2_JOURNAL_ACCESS_WRITE);
  3462. if (ret) {
  3463. mlog_errno(ret);
  3464. goto out_commit;
  3465. }
  3466. spin_lock(&OCFS2_I(t_inode)->ip_lock);
  3467. OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters;
  3468. OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr;
  3469. OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
  3470. spin_unlock(&OCFS2_I(t_inode)->ip_lock);
  3471. i_size_write(t_inode, size);
  3472. t_inode->i_blocks = s_inode->i_blocks;
  3473. di->i_xattr_inline_size = s_di->i_xattr_inline_size;
  3474. di->i_clusters = s_di->i_clusters;
  3475. di->i_size = s_di->i_size;
  3476. di->i_dyn_features = s_di->i_dyn_features;
  3477. di->i_attr = s_di->i_attr;
  3478. if (preserve) {
  3479. di->i_uid = s_di->i_uid;
  3480. di->i_gid = s_di->i_gid;
  3481. di->i_mode = s_di->i_mode;
  3482. /*
  3483. * update time.
  3484. * we want mtime to appear identical to the source and
  3485. * update ctime.
  3486. */
  3487. t_inode->i_ctime = CURRENT_TIME;
  3488. di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
  3489. di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
  3490. t_inode->i_mtime = s_inode->i_mtime;
  3491. di->i_mtime = s_di->i_mtime;
  3492. di->i_mtime_nsec = s_di->i_mtime_nsec;
  3493. }
  3494. ocfs2_journal_dirty(handle, t_bh);
  3495. out_commit:
  3496. ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle);
  3497. return ret;
  3498. }
  3499. static int ocfs2_create_reflink_node(struct inode *s_inode,
  3500. struct buffer_head *s_bh,
  3501. struct inode *t_inode,
  3502. struct buffer_head *t_bh,
  3503. bool preserve)
  3504. {
  3505. int ret;
  3506. struct buffer_head *ref_root_bh = NULL;
  3507. struct ocfs2_cached_dealloc_ctxt dealloc;
  3508. struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
  3509. struct ocfs2_refcount_block *rb;
  3510. struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
  3511. struct ocfs2_refcount_tree *ref_tree;
  3512. ocfs2_init_dealloc_ctxt(&dealloc);
  3513. ret = ocfs2_set_refcount_tree(t_inode, t_bh,
  3514. le64_to_cpu(di->i_refcount_loc));
  3515. if (ret) {
  3516. mlog_errno(ret);
  3517. goto out;
  3518. }
  3519. if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
  3520. ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
  3521. t_inode, t_bh);
  3522. if (ret)
  3523. mlog_errno(ret);
  3524. goto out;
  3525. }
  3526. ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
  3527. 1, &ref_tree, &ref_root_bh);
  3528. if (ret) {
  3529. mlog_errno(ret);
  3530. goto out;
  3531. }
  3532. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  3533. ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
  3534. &ref_tree->rf_ci, ref_root_bh,
  3535. &dealloc);
  3536. if (ret) {
  3537. mlog_errno(ret);
  3538. goto out_unlock_refcount;
  3539. }
  3540. out_unlock_refcount:
  3541. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  3542. brelse(ref_root_bh);
  3543. out:
  3544. if (ocfs2_dealloc_has_cluster(&dealloc)) {
  3545. ocfs2_schedule_truncate_log_flush(osb, 1);
  3546. ocfs2_run_deallocs(osb, &dealloc);
  3547. }
  3548. return ret;
  3549. }
  3550. static int __ocfs2_reflink(struct dentry *old_dentry,
  3551. struct buffer_head *old_bh,
  3552. struct inode *new_inode,
  3553. bool preserve)
  3554. {
  3555. int ret;
  3556. struct inode *inode = old_dentry->d_inode;
  3557. struct buffer_head *new_bh = NULL;
  3558. ret = filemap_fdatawrite(inode->i_mapping);
  3559. if (ret) {
  3560. mlog_errno(ret);
  3561. goto out;
  3562. }
  3563. ret = ocfs2_attach_refcount_tree(inode, old_bh);
  3564. if (ret) {
  3565. mlog_errno(ret);
  3566. goto out;
  3567. }
  3568. mutex_lock(&new_inode->i_mutex);
  3569. ret = ocfs2_inode_lock(new_inode, &new_bh, 1);
  3570. if (ret) {
  3571. mlog_errno(ret);
  3572. goto out_unlock;
  3573. }
  3574. ret = ocfs2_create_reflink_node(inode, old_bh,
  3575. new_inode, new_bh, preserve);
  3576. if (ret) {
  3577. mlog_errno(ret);
  3578. goto inode_unlock;
  3579. }
  3580. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
  3581. ret = ocfs2_reflink_xattrs(inode, old_bh,
  3582. new_inode, new_bh,
  3583. preserve);
  3584. if (ret) {
  3585. mlog_errno(ret);
  3586. goto inode_unlock;
  3587. }
  3588. }
  3589. ret = ocfs2_complete_reflink(inode, old_bh,
  3590. new_inode, new_bh, preserve);
  3591. if (ret)
  3592. mlog_errno(ret);
  3593. inode_unlock:
  3594. ocfs2_inode_unlock(new_inode, 1);
  3595. brelse(new_bh);
  3596. out_unlock:
  3597. mutex_unlock(&new_inode->i_mutex);
  3598. out:
  3599. if (!ret) {
  3600. ret = filemap_fdatawait(inode->i_mapping);
  3601. if (ret)
  3602. mlog_errno(ret);
  3603. }
  3604. return ret;
  3605. }
  3606. static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
  3607. struct dentry *new_dentry, bool preserve)
  3608. {
  3609. int error;
  3610. struct inode *inode = old_dentry->d_inode;
  3611. struct buffer_head *old_bh = NULL;
  3612. struct inode *new_orphan_inode = NULL;
  3613. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
  3614. return -EOPNOTSUPP;
  3615. error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
  3616. &new_orphan_inode);
  3617. if (error) {
  3618. mlog_errno(error);
  3619. goto out;
  3620. }
  3621. error = ocfs2_inode_lock(inode, &old_bh, 1);
  3622. if (error) {
  3623. mlog_errno(error);
  3624. goto out;
  3625. }
  3626. down_write(&OCFS2_I(inode)->ip_xattr_sem);
  3627. down_write(&OCFS2_I(inode)->ip_alloc_sem);
  3628. error = __ocfs2_reflink(old_dentry, old_bh,
  3629. new_orphan_inode, preserve);
  3630. up_write(&OCFS2_I(inode)->ip_alloc_sem);
  3631. up_write(&OCFS2_I(inode)->ip_xattr_sem);
  3632. ocfs2_inode_unlock(inode, 1);
  3633. brelse(old_bh);
  3634. if (error) {
  3635. mlog_errno(error);
  3636. goto out;
  3637. }
  3638. /* If the security isn't preserved, we need to re-initialize them. */
  3639. if (!preserve) {
  3640. error = ocfs2_init_security_and_acl(dir, new_orphan_inode);
  3641. if (error)
  3642. mlog_errno(error);
  3643. }
  3644. out:
  3645. if (!error) {
  3646. error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
  3647. new_dentry);
  3648. if (error)
  3649. mlog_errno(error);
  3650. }
  3651. if (new_orphan_inode) {
  3652. /*
  3653. * We need to open_unlock the inode no matter whether we
  3654. * succeed or not, so that other nodes can delete it later.
  3655. */
  3656. ocfs2_open_unlock(new_orphan_inode);
  3657. if (error)
  3658. iput(new_orphan_inode);
  3659. }
  3660. return error;
  3661. }
  3662. /*
  3663. * Below here are the bits used by OCFS2_IOC_REFLINK() to fake
  3664. * sys_reflink(). This will go away when vfs_reflink() exists in
  3665. * fs/namei.c.
  3666. */
  3667. /* copied from may_create in VFS. */
  3668. static inline int ocfs2_may_create(struct inode *dir, struct dentry *child)
  3669. {
  3670. if (child->d_inode)
  3671. return -EEXIST;
  3672. if (IS_DEADDIR(dir))
  3673. return -ENOENT;
  3674. return inode_permission(dir, MAY_WRITE | MAY_EXEC);
  3675. }
  3676. /* copied from user_path_parent. */
  3677. static int ocfs2_user_path_parent(const char __user *path,
  3678. struct nameidata *nd, char **name)
  3679. {
  3680. char *s = getname(path);
  3681. int error;
  3682. if (IS_ERR(s))
  3683. return PTR_ERR(s);
  3684. error = path_lookup(s, LOOKUP_PARENT, nd);
  3685. if (error)
  3686. putname(s);
  3687. else
  3688. *name = s;
  3689. return error;
  3690. }
  3691. /**
  3692. * ocfs2_vfs_reflink - Create a reference-counted link
  3693. *
  3694. * @old_dentry: source dentry + inode
  3695. * @dir: directory to create the target
  3696. * @new_dentry: target dentry
  3697. * @preserve: if true, preserve all file attributes
  3698. */
  3699. static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
  3700. struct dentry *new_dentry, bool preserve)
  3701. {
  3702. struct inode *inode = old_dentry->d_inode;
  3703. int error;
  3704. if (!inode)
  3705. return -ENOENT;
  3706. error = ocfs2_may_create(dir, new_dentry);
  3707. if (error)
  3708. return error;
  3709. if (dir->i_sb != inode->i_sb)
  3710. return -EXDEV;
  3711. /*
  3712. * A reflink to an append-only or immutable file cannot be created.
  3713. */
  3714. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  3715. return -EPERM;
  3716. /* Only regular files can be reflinked. */
  3717. if (!S_ISREG(inode->i_mode))
  3718. return -EPERM;
  3719. /*
  3720. * If the caller wants to preserve ownership, they require the
  3721. * rights to do so.
  3722. */
  3723. if (preserve) {
  3724. if ((current_fsuid() != inode->i_uid) && !capable(CAP_CHOWN))
  3725. return -EPERM;
  3726. if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN))
  3727. return -EPERM;
  3728. }
  3729. /*
  3730. * If the caller is modifying any aspect of the attributes, they
  3731. * are not creating a snapshot. They need read permission on the
  3732. * file.
  3733. */
  3734. if (!preserve) {
  3735. error = inode_permission(inode, MAY_READ);
  3736. if (error)
  3737. return error;
  3738. }
  3739. mutex_lock(&inode->i_mutex);
  3740. dquot_initialize(dir);
  3741. error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
  3742. mutex_unlock(&inode->i_mutex);
  3743. if (!error)
  3744. fsnotify_create(dir, new_dentry);
  3745. return error;
  3746. }
  3747. /*
  3748. * Most codes are copied from sys_linkat.
  3749. */
  3750. int ocfs2_reflink_ioctl(struct inode *inode,
  3751. const char __user *oldname,
  3752. const char __user *newname,
  3753. bool preserve)
  3754. {
  3755. struct dentry *new_dentry;
  3756. struct nameidata nd;
  3757. struct path old_path;
  3758. int error;
  3759. char *to = NULL;
  3760. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
  3761. return -EOPNOTSUPP;
  3762. error = user_path_at(AT_FDCWD, oldname, 0, &old_path);
  3763. if (error) {
  3764. mlog_errno(error);
  3765. return error;
  3766. }
  3767. error = ocfs2_user_path_parent(newname, &nd, &to);
  3768. if (error) {
  3769. mlog_errno(error);
  3770. goto out;
  3771. }
  3772. error = -EXDEV;
  3773. if (old_path.mnt != nd.path.mnt)
  3774. goto out_release;
  3775. new_dentry = lookup_create(&nd, 0);
  3776. error = PTR_ERR(new_dentry);
  3777. if (IS_ERR(new_dentry)) {
  3778. mlog_errno(error);
  3779. goto out_unlock;
  3780. }
  3781. error = mnt_want_write(nd.path.mnt);
  3782. if (error) {
  3783. mlog_errno(error);
  3784. goto out_dput;
  3785. }
  3786. error = ocfs2_vfs_reflink(old_path.dentry,
  3787. nd.path.dentry->d_inode,
  3788. new_dentry, preserve);
  3789. mnt_drop_write(nd.path.mnt);
  3790. out_dput:
  3791. dput(new_dentry);
  3792. out_unlock:
  3793. mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
  3794. out_release:
  3795. path_put(&nd.path);
  3796. putname(to);
  3797. out:
  3798. path_put(&old_path);
  3799. return error;
  3800. }