refcounttree.c 113 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * refcounttree.c
  5. *
  6. * Copyright (C) 2009 Oracle. All rights reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public
  10. * License version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <linux/sort.h>
  18. #define MLOG_MASK_PREFIX ML_REFCOUNT
  19. #include <cluster/masklog.h>
  20. #include "ocfs2.h"
  21. #include "inode.h"
  22. #include "alloc.h"
  23. #include "suballoc.h"
  24. #include "journal.h"
  25. #include "uptodate.h"
  26. #include "super.h"
  27. #include "buffer_head_io.h"
  28. #include "blockcheck.h"
  29. #include "refcounttree.h"
  30. #include "sysfile.h"
  31. #include "dlmglue.h"
  32. #include "extent_map.h"
  33. #include "aops.h"
  34. #include "xattr.h"
  35. #include "namei.h"
  36. #include <linux/bio.h>
  37. #include <linux/blkdev.h>
  38. #include <linux/slab.h>
  39. #include <linux/writeback.h>
  40. #include <linux/pagevec.h>
  41. #include <linux/swap.h>
  42. #include <linux/security.h>
  43. #include <linux/fsnotify.h>
  44. #include <linux/quotaops.h>
  45. #include <linux/namei.h>
  46. #include <linux/mount.h>
  47. struct ocfs2_cow_context {
  48. struct inode *inode;
  49. u32 cow_start;
  50. u32 cow_len;
  51. struct ocfs2_extent_tree data_et;
  52. struct ocfs2_refcount_tree *ref_tree;
  53. struct buffer_head *ref_root_bh;
  54. struct ocfs2_alloc_context *meta_ac;
  55. struct ocfs2_alloc_context *data_ac;
  56. struct ocfs2_cached_dealloc_ctxt dealloc;
  57. void *cow_object;
  58. struct ocfs2_post_refcount *post_refcount;
  59. int extra_credits;
  60. int (*get_clusters)(struct ocfs2_cow_context *context,
  61. u32 v_cluster, u32 *p_cluster,
  62. u32 *num_clusters,
  63. unsigned int *extent_flags);
  64. int (*cow_duplicate_clusters)(handle_t *handle,
  65. struct ocfs2_cow_context *context,
  66. u32 cpos, u32 old_cluster,
  67. u32 new_cluster, u32 new_len);
  68. };
  69. static inline struct ocfs2_refcount_tree *
  70. cache_info_to_refcount(struct ocfs2_caching_info *ci)
  71. {
  72. return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
  73. }
  74. static int ocfs2_validate_refcount_block(struct super_block *sb,
  75. struct buffer_head *bh)
  76. {
  77. int rc;
  78. struct ocfs2_refcount_block *rb =
  79. (struct ocfs2_refcount_block *)bh->b_data;
  80. mlog(0, "Validating refcount block %llu\n",
  81. (unsigned long long)bh->b_blocknr);
  82. BUG_ON(!buffer_uptodate(bh));
  83. /*
  84. * If the ecc fails, we return the error but otherwise
  85. * leave the filesystem running. We know any error is
  86. * local to this block.
  87. */
  88. rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
  89. if (rc) {
  90. mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
  91. (unsigned long long)bh->b_blocknr);
  92. return rc;
  93. }
  94. if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
  95. ocfs2_error(sb,
  96. "Refcount block #%llu has bad signature %.*s",
  97. (unsigned long long)bh->b_blocknr, 7,
  98. rb->rf_signature);
  99. return -EINVAL;
  100. }
  101. if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
  102. ocfs2_error(sb,
  103. "Refcount block #%llu has an invalid rf_blkno "
  104. "of %llu",
  105. (unsigned long long)bh->b_blocknr,
  106. (unsigned long long)le64_to_cpu(rb->rf_blkno));
  107. return -EINVAL;
  108. }
  109. if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
  110. ocfs2_error(sb,
  111. "Refcount block #%llu has an invalid "
  112. "rf_fs_generation of #%u",
  113. (unsigned long long)bh->b_blocknr,
  114. le32_to_cpu(rb->rf_fs_generation));
  115. return -EINVAL;
  116. }
  117. return 0;
  118. }
  119. static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
  120. u64 rb_blkno,
  121. struct buffer_head **bh)
  122. {
  123. int rc;
  124. struct buffer_head *tmp = *bh;
  125. rc = ocfs2_read_block(ci, rb_blkno, &tmp,
  126. ocfs2_validate_refcount_block);
  127. /* If ocfs2_read_block() got us a new bh, pass it up. */
  128. if (!rc && !*bh)
  129. *bh = tmp;
  130. return rc;
  131. }
  132. static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
  133. {
  134. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  135. return rf->rf_blkno;
  136. }
  137. static struct super_block *
  138. ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
  139. {
  140. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  141. return rf->rf_sb;
  142. }
  143. static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
  144. {
  145. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  146. spin_lock(&rf->rf_lock);
  147. }
  148. static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
  149. {
  150. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  151. spin_unlock(&rf->rf_lock);
  152. }
  153. static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
  154. {
  155. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  156. mutex_lock(&rf->rf_io_mutex);
  157. }
  158. static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
  159. {
  160. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  161. mutex_unlock(&rf->rf_io_mutex);
  162. }
  163. static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
  164. .co_owner = ocfs2_refcount_cache_owner,
  165. .co_get_super = ocfs2_refcount_cache_get_super,
  166. .co_cache_lock = ocfs2_refcount_cache_lock,
  167. .co_cache_unlock = ocfs2_refcount_cache_unlock,
  168. .co_io_lock = ocfs2_refcount_cache_io_lock,
  169. .co_io_unlock = ocfs2_refcount_cache_io_unlock,
  170. };
  171. static struct ocfs2_refcount_tree *
  172. ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
  173. {
  174. struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
  175. struct ocfs2_refcount_tree *tree = NULL;
  176. while (n) {
  177. tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
  178. if (blkno < tree->rf_blkno)
  179. n = n->rb_left;
  180. else if (blkno > tree->rf_blkno)
  181. n = n->rb_right;
  182. else
  183. return tree;
  184. }
  185. return NULL;
  186. }
  187. /* osb_lock is already locked. */
  188. static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
  189. struct ocfs2_refcount_tree *new)
  190. {
  191. u64 rf_blkno = new->rf_blkno;
  192. struct rb_node *parent = NULL;
  193. struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
  194. struct ocfs2_refcount_tree *tmp;
  195. while (*p) {
  196. parent = *p;
  197. tmp = rb_entry(parent, struct ocfs2_refcount_tree,
  198. rf_node);
  199. if (rf_blkno < tmp->rf_blkno)
  200. p = &(*p)->rb_left;
  201. else if (rf_blkno > tmp->rf_blkno)
  202. p = &(*p)->rb_right;
  203. else {
  204. /* This should never happen! */
  205. mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
  206. (unsigned long long)rf_blkno);
  207. BUG();
  208. }
  209. }
  210. rb_link_node(&new->rf_node, parent, p);
  211. rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
  212. }
  213. static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
  214. {
  215. ocfs2_metadata_cache_exit(&tree->rf_ci);
  216. ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
  217. ocfs2_lock_res_free(&tree->rf_lockres);
  218. kfree(tree);
  219. }
  220. static inline void
  221. ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
  222. struct ocfs2_refcount_tree *tree)
  223. {
  224. rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
  225. if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
  226. osb->osb_ref_tree_lru = NULL;
  227. }
  228. static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
  229. struct ocfs2_refcount_tree *tree)
  230. {
  231. spin_lock(&osb->osb_lock);
  232. ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
  233. spin_unlock(&osb->osb_lock);
  234. }
  235. static void ocfs2_kref_remove_refcount_tree(struct kref *kref)
  236. {
  237. struct ocfs2_refcount_tree *tree =
  238. container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
  239. ocfs2_free_refcount_tree(tree);
  240. }
  241. static inline void
  242. ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
  243. {
  244. kref_get(&tree->rf_getcnt);
  245. }
  246. static inline void
  247. ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
  248. {
  249. kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
  250. }
  251. static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
  252. struct super_block *sb)
  253. {
  254. ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
  255. mutex_init(&new->rf_io_mutex);
  256. new->rf_sb = sb;
  257. spin_lock_init(&new->rf_lock);
  258. }
  259. static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
  260. struct ocfs2_refcount_tree *new,
  261. u64 rf_blkno, u32 generation)
  262. {
  263. init_rwsem(&new->rf_sem);
  264. ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
  265. rf_blkno, generation);
  266. }
  267. static struct ocfs2_refcount_tree*
  268. ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
  269. {
  270. struct ocfs2_refcount_tree *new;
  271. new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
  272. if (!new)
  273. return NULL;
  274. new->rf_blkno = rf_blkno;
  275. kref_init(&new->rf_getcnt);
  276. ocfs2_init_refcount_tree_ci(new, osb->sb);
  277. return new;
  278. }
  279. static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
  280. struct ocfs2_refcount_tree **ret_tree)
  281. {
  282. int ret = 0;
  283. struct ocfs2_refcount_tree *tree, *new = NULL;
  284. struct buffer_head *ref_root_bh = NULL;
  285. struct ocfs2_refcount_block *ref_rb;
  286. spin_lock(&osb->osb_lock);
  287. if (osb->osb_ref_tree_lru &&
  288. osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
  289. tree = osb->osb_ref_tree_lru;
  290. else
  291. tree = ocfs2_find_refcount_tree(osb, rf_blkno);
  292. if (tree)
  293. goto out;
  294. spin_unlock(&osb->osb_lock);
  295. new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
  296. if (!new) {
  297. ret = -ENOMEM;
  298. mlog_errno(ret);
  299. return ret;
  300. }
  301. /*
  302. * We need the generation to create the refcount tree lock and since
  303. * it isn't changed during the tree modification, we are safe here to
  304. * read without protection.
  305. * We also have to purge the cache after we create the lock since the
  306. * refcount block may have the stale data. It can only be trusted when
  307. * we hold the refcount lock.
  308. */
  309. ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
  310. if (ret) {
  311. mlog_errno(ret);
  312. ocfs2_metadata_cache_exit(&new->rf_ci);
  313. kfree(new);
  314. return ret;
  315. }
  316. ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  317. new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
  318. ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
  319. new->rf_generation);
  320. ocfs2_metadata_cache_purge(&new->rf_ci);
  321. spin_lock(&osb->osb_lock);
  322. tree = ocfs2_find_refcount_tree(osb, rf_blkno);
  323. if (tree)
  324. goto out;
  325. ocfs2_insert_refcount_tree(osb, new);
  326. tree = new;
  327. new = NULL;
  328. out:
  329. *ret_tree = tree;
  330. osb->osb_ref_tree_lru = tree;
  331. spin_unlock(&osb->osb_lock);
  332. if (new)
  333. ocfs2_free_refcount_tree(new);
  334. brelse(ref_root_bh);
  335. return ret;
  336. }
  337. static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
  338. {
  339. int ret;
  340. struct buffer_head *di_bh = NULL;
  341. struct ocfs2_dinode *di;
  342. ret = ocfs2_read_inode_block(inode, &di_bh);
  343. if (ret) {
  344. mlog_errno(ret);
  345. goto out;
  346. }
  347. BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  348. di = (struct ocfs2_dinode *)di_bh->b_data;
  349. *ref_blkno = le64_to_cpu(di->i_refcount_loc);
  350. brelse(di_bh);
  351. out:
  352. return ret;
  353. }
  354. static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
  355. struct ocfs2_refcount_tree *tree, int rw)
  356. {
  357. int ret;
  358. ret = ocfs2_refcount_lock(tree, rw);
  359. if (ret) {
  360. mlog_errno(ret);
  361. goto out;
  362. }
  363. if (rw)
  364. down_write(&tree->rf_sem);
  365. else
  366. down_read(&tree->rf_sem);
  367. out:
  368. return ret;
  369. }
  370. /*
  371. * Lock the refcount tree pointed by ref_blkno and return the tree.
  372. * In most case, we lock the tree and read the refcount block.
  373. * So read it here if the caller really needs it.
  374. *
  375. * If the tree has been re-created by other node, it will free the
  376. * old one and re-create it.
  377. */
  378. int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
  379. u64 ref_blkno, int rw,
  380. struct ocfs2_refcount_tree **ret_tree,
  381. struct buffer_head **ref_bh)
  382. {
  383. int ret, delete_tree = 0;
  384. struct ocfs2_refcount_tree *tree = NULL;
  385. struct buffer_head *ref_root_bh = NULL;
  386. struct ocfs2_refcount_block *rb;
  387. again:
  388. ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
  389. if (ret) {
  390. mlog_errno(ret);
  391. return ret;
  392. }
  393. ocfs2_refcount_tree_get(tree);
  394. ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
  395. if (ret) {
  396. mlog_errno(ret);
  397. ocfs2_refcount_tree_put(tree);
  398. goto out;
  399. }
  400. ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
  401. &ref_root_bh);
  402. if (ret) {
  403. mlog_errno(ret);
  404. ocfs2_unlock_refcount_tree(osb, tree, rw);
  405. ocfs2_refcount_tree_put(tree);
  406. goto out;
  407. }
  408. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  409. /*
  410. * If the refcount block has been freed and re-created, we may need
  411. * to recreate the refcount tree also.
  412. *
  413. * Here we just remove the tree from the rb-tree, and the last
  414. * kref holder will unlock and delete this refcount_tree.
  415. * Then we goto "again" and ocfs2_get_refcount_tree will create
  416. * the new refcount tree for us.
  417. */
  418. if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
  419. if (!tree->rf_removed) {
  420. ocfs2_erase_refcount_tree_from_list(osb, tree);
  421. tree->rf_removed = 1;
  422. delete_tree = 1;
  423. }
  424. ocfs2_unlock_refcount_tree(osb, tree, rw);
  425. /*
  426. * We get an extra reference when we create the refcount
  427. * tree, so another put will destroy it.
  428. */
  429. if (delete_tree)
  430. ocfs2_refcount_tree_put(tree);
  431. brelse(ref_root_bh);
  432. ref_root_bh = NULL;
  433. goto again;
  434. }
  435. *ret_tree = tree;
  436. if (ref_bh) {
  437. *ref_bh = ref_root_bh;
  438. ref_root_bh = NULL;
  439. }
  440. out:
  441. brelse(ref_root_bh);
  442. return ret;
  443. }
  444. void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
  445. struct ocfs2_refcount_tree *tree, int rw)
  446. {
  447. if (rw)
  448. up_write(&tree->rf_sem);
  449. else
  450. up_read(&tree->rf_sem);
  451. ocfs2_refcount_unlock(tree, rw);
  452. ocfs2_refcount_tree_put(tree);
  453. }
  454. void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
  455. {
  456. struct rb_node *node;
  457. struct ocfs2_refcount_tree *tree;
  458. struct rb_root *root = &osb->osb_rf_lock_tree;
  459. while ((node = rb_last(root)) != NULL) {
  460. tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
  461. mlog(0, "Purge tree %llu\n",
  462. (unsigned long long) tree->rf_blkno);
  463. rb_erase(&tree->rf_node, root);
  464. ocfs2_free_refcount_tree(tree);
  465. }
  466. }
  467. /*
  468. * Create a refcount tree for an inode.
  469. * We take for granted that the inode is already locked.
  470. */
  471. static int ocfs2_create_refcount_tree(struct inode *inode,
  472. struct buffer_head *di_bh)
  473. {
  474. int ret;
  475. handle_t *handle = NULL;
  476. struct ocfs2_alloc_context *meta_ac = NULL;
  477. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  478. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  479. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  480. struct buffer_head *new_bh = NULL;
  481. struct ocfs2_refcount_block *rb;
  482. struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
  483. u16 suballoc_bit_start;
  484. u32 num_got;
  485. u64 suballoc_loc, first_blkno;
  486. BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
  487. mlog(0, "create tree for inode %lu\n", inode->i_ino);
  488. ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
  489. if (ret) {
  490. mlog_errno(ret);
  491. goto out;
  492. }
  493. handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
  494. if (IS_ERR(handle)) {
  495. ret = PTR_ERR(handle);
  496. mlog_errno(ret);
  497. goto out;
  498. }
  499. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  500. OCFS2_JOURNAL_ACCESS_WRITE);
  501. if (ret) {
  502. mlog_errno(ret);
  503. goto out_commit;
  504. }
  505. ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
  506. &suballoc_bit_start, &num_got,
  507. &first_blkno);
  508. if (ret) {
  509. mlog_errno(ret);
  510. goto out_commit;
  511. }
  512. new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
  513. if (!new_tree) {
  514. ret = -ENOMEM;
  515. mlog_errno(ret);
  516. goto out_commit;
  517. }
  518. new_bh = sb_getblk(inode->i_sb, first_blkno);
  519. ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
  520. ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
  521. OCFS2_JOURNAL_ACCESS_CREATE);
  522. if (ret) {
  523. mlog_errno(ret);
  524. goto out_commit;
  525. }
  526. /* Initialize ocfs2_refcount_block. */
  527. rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  528. memset(rb, 0, inode->i_sb->s_blocksize);
  529. strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
  530. rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
  531. rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
  532. rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  533. rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
  534. rb->rf_blkno = cpu_to_le64(first_blkno);
  535. rb->rf_count = cpu_to_le32(1);
  536. rb->rf_records.rl_count =
  537. cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
  538. spin_lock(&osb->osb_lock);
  539. rb->rf_generation = osb->s_next_generation++;
  540. spin_unlock(&osb->osb_lock);
  541. ocfs2_journal_dirty(handle, new_bh);
  542. spin_lock(&oi->ip_lock);
  543. oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
  544. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  545. di->i_refcount_loc = cpu_to_le64(first_blkno);
  546. spin_unlock(&oi->ip_lock);
  547. mlog(0, "created tree for inode %lu, refblock %llu\n",
  548. inode->i_ino, (unsigned long long)first_blkno);
  549. ocfs2_journal_dirty(handle, di_bh);
  550. /*
  551. * We have to init the tree lock here since it will use
  552. * the generation number to create it.
  553. */
  554. new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
  555. ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
  556. new_tree->rf_generation);
  557. spin_lock(&osb->osb_lock);
  558. tree = ocfs2_find_refcount_tree(osb, first_blkno);
  559. /*
  560. * We've just created a new refcount tree in this block. If
  561. * we found a refcount tree on the ocfs2_super, it must be
  562. * one we just deleted. We free the old tree before
  563. * inserting the new tree.
  564. */
  565. BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
  566. if (tree)
  567. ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
  568. ocfs2_insert_refcount_tree(osb, new_tree);
  569. spin_unlock(&osb->osb_lock);
  570. new_tree = NULL;
  571. if (tree)
  572. ocfs2_refcount_tree_put(tree);
  573. out_commit:
  574. ocfs2_commit_trans(osb, handle);
  575. out:
  576. if (new_tree) {
  577. ocfs2_metadata_cache_exit(&new_tree->rf_ci);
  578. kfree(new_tree);
  579. }
  580. brelse(new_bh);
  581. if (meta_ac)
  582. ocfs2_free_alloc_context(meta_ac);
  583. return ret;
  584. }
  585. static int ocfs2_set_refcount_tree(struct inode *inode,
  586. struct buffer_head *di_bh,
  587. u64 refcount_loc)
  588. {
  589. int ret;
  590. handle_t *handle = NULL;
  591. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  592. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  593. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  594. struct buffer_head *ref_root_bh = NULL;
  595. struct ocfs2_refcount_block *rb;
  596. struct ocfs2_refcount_tree *ref_tree;
  597. BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
  598. ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
  599. &ref_tree, &ref_root_bh);
  600. if (ret) {
  601. mlog_errno(ret);
  602. return ret;
  603. }
  604. handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
  605. if (IS_ERR(handle)) {
  606. ret = PTR_ERR(handle);
  607. mlog_errno(ret);
  608. goto out;
  609. }
  610. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  611. OCFS2_JOURNAL_ACCESS_WRITE);
  612. if (ret) {
  613. mlog_errno(ret);
  614. goto out_commit;
  615. }
  616. ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
  617. OCFS2_JOURNAL_ACCESS_WRITE);
  618. if (ret) {
  619. mlog_errno(ret);
  620. goto out_commit;
  621. }
  622. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  623. le32_add_cpu(&rb->rf_count, 1);
  624. ocfs2_journal_dirty(handle, ref_root_bh);
  625. spin_lock(&oi->ip_lock);
  626. oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
  627. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  628. di->i_refcount_loc = cpu_to_le64(refcount_loc);
  629. spin_unlock(&oi->ip_lock);
  630. ocfs2_journal_dirty(handle, di_bh);
  631. out_commit:
  632. ocfs2_commit_trans(osb, handle);
  633. out:
  634. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  635. brelse(ref_root_bh);
  636. return ret;
  637. }
  638. int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
  639. {
  640. int ret, delete_tree = 0;
  641. handle_t *handle = NULL;
  642. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  643. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  644. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  645. struct ocfs2_refcount_block *rb;
  646. struct inode *alloc_inode = NULL;
  647. struct buffer_head *alloc_bh = NULL;
  648. struct buffer_head *blk_bh = NULL;
  649. struct ocfs2_refcount_tree *ref_tree;
  650. int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
  651. u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
  652. u16 bit = 0;
  653. if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL))
  654. return 0;
  655. BUG_ON(!ref_blkno);
  656. ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
  657. if (ret) {
  658. mlog_errno(ret);
  659. return ret;
  660. }
  661. rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
  662. /*
  663. * If we are the last user, we need to free the block.
  664. * So lock the allocator ahead.
  665. */
  666. if (le32_to_cpu(rb->rf_count) == 1) {
  667. blk = le64_to_cpu(rb->rf_blkno);
  668. bit = le16_to_cpu(rb->rf_suballoc_bit);
  669. if (rb->rf_suballoc_loc)
  670. bg_blkno = le64_to_cpu(rb->rf_suballoc_loc);
  671. else
  672. bg_blkno = ocfs2_which_suballoc_group(blk, bit);
  673. alloc_inode = ocfs2_get_system_file_inode(osb,
  674. EXTENT_ALLOC_SYSTEM_INODE,
  675. le16_to_cpu(rb->rf_suballoc_slot));
  676. if (!alloc_inode) {
  677. ret = -ENOMEM;
  678. mlog_errno(ret);
  679. goto out;
  680. }
  681. mutex_lock(&alloc_inode->i_mutex);
  682. ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
  683. if (ret) {
  684. mlog_errno(ret);
  685. goto out_mutex;
  686. }
  687. credits += OCFS2_SUBALLOC_FREE;
  688. }
  689. handle = ocfs2_start_trans(osb, credits);
  690. if (IS_ERR(handle)) {
  691. ret = PTR_ERR(handle);
  692. mlog_errno(ret);
  693. goto out_unlock;
  694. }
  695. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  696. OCFS2_JOURNAL_ACCESS_WRITE);
  697. if (ret) {
  698. mlog_errno(ret);
  699. goto out_commit;
  700. }
  701. ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
  702. OCFS2_JOURNAL_ACCESS_WRITE);
  703. if (ret) {
  704. mlog_errno(ret);
  705. goto out_commit;
  706. }
  707. spin_lock(&oi->ip_lock);
  708. oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
  709. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  710. di->i_refcount_loc = 0;
  711. spin_unlock(&oi->ip_lock);
  712. ocfs2_journal_dirty(handle, di_bh);
  713. le32_add_cpu(&rb->rf_count , -1);
  714. ocfs2_journal_dirty(handle, blk_bh);
  715. if (!rb->rf_count) {
  716. delete_tree = 1;
  717. ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
  718. ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
  719. alloc_bh, bit, bg_blkno, 1);
  720. if (ret)
  721. mlog_errno(ret);
  722. }
  723. out_commit:
  724. ocfs2_commit_trans(osb, handle);
  725. out_unlock:
  726. if (alloc_inode) {
  727. ocfs2_inode_unlock(alloc_inode, 1);
  728. brelse(alloc_bh);
  729. }
  730. out_mutex:
  731. if (alloc_inode) {
  732. mutex_unlock(&alloc_inode->i_mutex);
  733. iput(alloc_inode);
  734. }
  735. out:
  736. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  737. if (delete_tree)
  738. ocfs2_refcount_tree_put(ref_tree);
  739. brelse(blk_bh);
  740. return ret;
  741. }
  742. static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
  743. struct buffer_head *ref_leaf_bh,
  744. u64 cpos, unsigned int len,
  745. struct ocfs2_refcount_rec *ret_rec,
  746. int *index)
  747. {
  748. int i = 0;
  749. struct ocfs2_refcount_block *rb =
  750. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  751. struct ocfs2_refcount_rec *rec = NULL;
  752. for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
  753. rec = &rb->rf_records.rl_recs[i];
  754. if (le64_to_cpu(rec->r_cpos) +
  755. le32_to_cpu(rec->r_clusters) <= cpos)
  756. continue;
  757. else if (le64_to_cpu(rec->r_cpos) > cpos)
  758. break;
  759. /* ok, cpos fail in this rec. Just return. */
  760. if (ret_rec)
  761. *ret_rec = *rec;
  762. goto out;
  763. }
  764. if (ret_rec) {
  765. /* We meet with a hole here, so fake the rec. */
  766. ret_rec->r_cpos = cpu_to_le64(cpos);
  767. ret_rec->r_refcount = 0;
  768. if (i < le16_to_cpu(rb->rf_records.rl_used) &&
  769. le64_to_cpu(rec->r_cpos) < cpos + len)
  770. ret_rec->r_clusters =
  771. cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
  772. else
  773. ret_rec->r_clusters = cpu_to_le32(len);
  774. }
  775. out:
  776. *index = i;
  777. }
  778. /*
  779. * Try to remove refcount tree. The mechanism is:
  780. * 1) Check whether i_clusters == 0, if no, exit.
  781. * 2) check whether we have i_xattr_loc in dinode. if yes, exit.
  782. * 3) Check whether we have inline xattr stored outside, if yes, exit.
  783. * 4) Remove the tree.
  784. */
  785. int ocfs2_try_remove_refcount_tree(struct inode *inode,
  786. struct buffer_head *di_bh)
  787. {
  788. int ret;
  789. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  790. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  791. down_write(&oi->ip_xattr_sem);
  792. down_write(&oi->ip_alloc_sem);
  793. if (oi->ip_clusters)
  794. goto out;
  795. if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc)
  796. goto out;
  797. if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL &&
  798. ocfs2_has_inline_xattr_value_outside(inode, di))
  799. goto out;
  800. ret = ocfs2_remove_refcount_tree(inode, di_bh);
  801. if (ret)
  802. mlog_errno(ret);
  803. out:
  804. up_write(&oi->ip_alloc_sem);
  805. up_write(&oi->ip_xattr_sem);
  806. return 0;
  807. }
  808. /*
  809. * Find the end range for a leaf refcount block indicated by
  810. * el->l_recs[index].e_blkno.
  811. */
  812. static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
  813. struct buffer_head *ref_root_bh,
  814. struct ocfs2_extent_block *eb,
  815. struct ocfs2_extent_list *el,
  816. int index, u32 *cpos_end)
  817. {
  818. int ret, i, subtree_root;
  819. u32 cpos;
  820. u64 blkno;
  821. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  822. struct ocfs2_path *left_path = NULL, *right_path = NULL;
  823. struct ocfs2_extent_tree et;
  824. struct ocfs2_extent_list *tmp_el;
  825. if (index < le16_to_cpu(el->l_next_free_rec) - 1) {
  826. /*
  827. * We have a extent rec after index, so just use the e_cpos
  828. * of the next extent rec.
  829. */
  830. *cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos);
  831. return 0;
  832. }
  833. if (!eb || (eb && !eb->h_next_leaf_blk)) {
  834. /*
  835. * We are the last extent rec, so any high cpos should
  836. * be stored in this leaf refcount block.
  837. */
  838. *cpos_end = UINT_MAX;
  839. return 0;
  840. }
  841. /*
  842. * If the extent block isn't the last one, we have to find
  843. * the subtree root between this extent block and the next
  844. * leaf extent block and get the corresponding e_cpos from
  845. * the subroot. Otherwise we may corrupt the b-tree.
  846. */
  847. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  848. left_path = ocfs2_new_path_from_et(&et);
  849. if (!left_path) {
  850. ret = -ENOMEM;
  851. mlog_errno(ret);
  852. goto out;
  853. }
  854. cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos);
  855. ret = ocfs2_find_path(ci, left_path, cpos);
  856. if (ret) {
  857. mlog_errno(ret);
  858. goto out;
  859. }
  860. right_path = ocfs2_new_path_from_path(left_path);
  861. if (!right_path) {
  862. ret = -ENOMEM;
  863. mlog_errno(ret);
  864. goto out;
  865. }
  866. ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos);
  867. if (ret) {
  868. mlog_errno(ret);
  869. goto out;
  870. }
  871. ret = ocfs2_find_path(ci, right_path, cpos);
  872. if (ret) {
  873. mlog_errno(ret);
  874. goto out;
  875. }
  876. subtree_root = ocfs2_find_subtree_root(&et, left_path,
  877. right_path);
  878. tmp_el = left_path->p_node[subtree_root].el;
  879. blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
  880. for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) {
  881. if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
  882. *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
  883. break;
  884. }
  885. }
  886. BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec));
  887. out:
  888. ocfs2_free_path(left_path);
  889. ocfs2_free_path(right_path);
  890. return ret;
  891. }
  892. /*
  893. * Given a cpos and len, try to find the refcount record which contains cpos.
  894. * 1. If cpos can be found in one refcount record, return the record.
  895. * 2. If cpos can't be found, return a fake record which start from cpos
  896. * and end at a small value between cpos+len and start of the next record.
  897. * This fake record has r_refcount = 0.
  898. */
  899. static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
  900. struct buffer_head *ref_root_bh,
  901. u64 cpos, unsigned int len,
  902. struct ocfs2_refcount_rec *ret_rec,
  903. int *index,
  904. struct buffer_head **ret_bh)
  905. {
  906. int ret = 0, i, found;
  907. u32 low_cpos, uninitialized_var(cpos_end);
  908. struct ocfs2_extent_list *el;
  909. struct ocfs2_extent_rec *rec = NULL;
  910. struct ocfs2_extent_block *eb = NULL;
  911. struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
  912. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  913. struct ocfs2_refcount_block *rb =
  914. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  915. if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
  916. ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
  917. ret_rec, index);
  918. *ret_bh = ref_root_bh;
  919. get_bh(ref_root_bh);
  920. return 0;
  921. }
  922. el = &rb->rf_list;
  923. low_cpos = cpos & OCFS2_32BIT_POS_MASK;
  924. if (el->l_tree_depth) {
  925. ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
  926. if (ret) {
  927. mlog_errno(ret);
  928. goto out;
  929. }
  930. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  931. el = &eb->h_list;
  932. if (el->l_tree_depth) {
  933. ocfs2_error(sb,
  934. "refcount tree %llu has non zero tree "
  935. "depth in leaf btree tree block %llu\n",
  936. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  937. (unsigned long long)eb_bh->b_blocknr);
  938. ret = -EROFS;
  939. goto out;
  940. }
  941. }
  942. found = 0;
  943. for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
  944. rec = &el->l_recs[i];
  945. if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
  946. found = 1;
  947. break;
  948. }
  949. }
  950. if (found) {
  951. ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh,
  952. eb, el, i, &cpos_end);
  953. if (ret) {
  954. mlog_errno(ret);
  955. goto out;
  956. }
  957. if (cpos_end < low_cpos + len)
  958. len = cpos_end - low_cpos;
  959. }
  960. ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
  961. &ref_leaf_bh);
  962. if (ret) {
  963. mlog_errno(ret);
  964. goto out;
  965. }
  966. ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
  967. ret_rec, index);
  968. *ret_bh = ref_leaf_bh;
  969. out:
  970. brelse(eb_bh);
  971. return ret;
  972. }
  973. enum ocfs2_ref_rec_contig {
  974. REF_CONTIG_NONE = 0,
  975. REF_CONTIG_LEFT,
  976. REF_CONTIG_RIGHT,
  977. REF_CONTIG_LEFTRIGHT,
  978. };
  979. static enum ocfs2_ref_rec_contig
  980. ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
  981. int index)
  982. {
  983. if ((rb->rf_records.rl_recs[index].r_refcount ==
  984. rb->rf_records.rl_recs[index + 1].r_refcount) &&
  985. (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
  986. le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
  987. le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
  988. return REF_CONTIG_RIGHT;
  989. return REF_CONTIG_NONE;
  990. }
  991. static enum ocfs2_ref_rec_contig
  992. ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
  993. int index)
  994. {
  995. enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
  996. if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
  997. ret = ocfs2_refcount_rec_adjacent(rb, index);
  998. if (index > 0) {
  999. enum ocfs2_ref_rec_contig tmp;
  1000. tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
  1001. if (tmp == REF_CONTIG_RIGHT) {
  1002. if (ret == REF_CONTIG_RIGHT)
  1003. ret = REF_CONTIG_LEFTRIGHT;
  1004. else
  1005. ret = REF_CONTIG_LEFT;
  1006. }
  1007. }
  1008. return ret;
  1009. }
  1010. static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
  1011. int index)
  1012. {
  1013. BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
  1014. rb->rf_records.rl_recs[index+1].r_refcount);
  1015. le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
  1016. le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
  1017. if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
  1018. memmove(&rb->rf_records.rl_recs[index + 1],
  1019. &rb->rf_records.rl_recs[index + 2],
  1020. sizeof(struct ocfs2_refcount_rec) *
  1021. (le16_to_cpu(rb->rf_records.rl_used) - index - 2));
  1022. memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
  1023. 0, sizeof(struct ocfs2_refcount_rec));
  1024. le16_add_cpu(&rb->rf_records.rl_used, -1);
  1025. }
  1026. /*
  1027. * Merge the refcount rec if we are contiguous with the adjacent recs.
  1028. */
  1029. static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
  1030. int index)
  1031. {
  1032. enum ocfs2_ref_rec_contig contig =
  1033. ocfs2_refcount_rec_contig(rb, index);
  1034. if (contig == REF_CONTIG_NONE)
  1035. return;
  1036. if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
  1037. BUG_ON(index == 0);
  1038. index--;
  1039. }
  1040. ocfs2_rotate_refcount_rec_left(rb, index);
  1041. if (contig == REF_CONTIG_LEFTRIGHT)
  1042. ocfs2_rotate_refcount_rec_left(rb, index);
  1043. }
  1044. /*
  1045. * Change the refcount indexed by "index" in ref_bh.
  1046. * If refcount reaches 0, remove it.
  1047. */
  1048. static int ocfs2_change_refcount_rec(handle_t *handle,
  1049. struct ocfs2_caching_info *ci,
  1050. struct buffer_head *ref_leaf_bh,
  1051. int index, int merge, int change)
  1052. {
  1053. int ret;
  1054. struct ocfs2_refcount_block *rb =
  1055. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1056. struct ocfs2_refcount_list *rl = &rb->rf_records;
  1057. struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
  1058. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1059. OCFS2_JOURNAL_ACCESS_WRITE);
  1060. if (ret) {
  1061. mlog_errno(ret);
  1062. goto out;
  1063. }
  1064. mlog(0, "change index %d, old count %u, change %d\n", index,
  1065. le32_to_cpu(rec->r_refcount), change);
  1066. le32_add_cpu(&rec->r_refcount, change);
  1067. if (!rec->r_refcount) {
  1068. if (index != le16_to_cpu(rl->rl_used) - 1) {
  1069. memmove(rec, rec + 1,
  1070. (le16_to_cpu(rl->rl_used) - index - 1) *
  1071. sizeof(struct ocfs2_refcount_rec));
  1072. memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
  1073. 0, sizeof(struct ocfs2_refcount_rec));
  1074. }
  1075. le16_add_cpu(&rl->rl_used, -1);
  1076. } else if (merge)
  1077. ocfs2_refcount_rec_merge(rb, index);
  1078. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1079. out:
  1080. return ret;
  1081. }
  1082. static int ocfs2_expand_inline_ref_root(handle_t *handle,
  1083. struct ocfs2_caching_info *ci,
  1084. struct buffer_head *ref_root_bh,
  1085. struct buffer_head **ref_leaf_bh,
  1086. struct ocfs2_alloc_context *meta_ac)
  1087. {
  1088. int ret;
  1089. u16 suballoc_bit_start;
  1090. u32 num_got;
  1091. u64 suballoc_loc, blkno;
  1092. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1093. struct buffer_head *new_bh = NULL;
  1094. struct ocfs2_refcount_block *new_rb;
  1095. struct ocfs2_refcount_block *root_rb =
  1096. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1097. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  1098. OCFS2_JOURNAL_ACCESS_WRITE);
  1099. if (ret) {
  1100. mlog_errno(ret);
  1101. goto out;
  1102. }
  1103. ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
  1104. &suballoc_bit_start, &num_got,
  1105. &blkno);
  1106. if (ret) {
  1107. mlog_errno(ret);
  1108. goto out;
  1109. }
  1110. new_bh = sb_getblk(sb, blkno);
  1111. if (new_bh == NULL) {
  1112. ret = -EIO;
  1113. mlog_errno(ret);
  1114. goto out;
  1115. }
  1116. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  1117. ret = ocfs2_journal_access_rb(handle, ci, new_bh,
  1118. OCFS2_JOURNAL_ACCESS_CREATE);
  1119. if (ret) {
  1120. mlog_errno(ret);
  1121. goto out;
  1122. }
  1123. /*
  1124. * Initialize ocfs2_refcount_block.
  1125. * It should contain the same information as the old root.
  1126. * so just memcpy it and change the corresponding field.
  1127. */
  1128. memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
  1129. new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  1130. new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
  1131. new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
  1132. new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  1133. new_rb->rf_blkno = cpu_to_le64(blkno);
  1134. new_rb->rf_cpos = cpu_to_le32(0);
  1135. new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
  1136. new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
  1137. ocfs2_journal_dirty(handle, new_bh);
  1138. /* Now change the root. */
  1139. memset(&root_rb->rf_list, 0, sb->s_blocksize -
  1140. offsetof(struct ocfs2_refcount_block, rf_list));
  1141. root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
  1142. root_rb->rf_clusters = cpu_to_le32(1);
  1143. root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
  1144. root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
  1145. root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
  1146. root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
  1147. ocfs2_journal_dirty(handle, ref_root_bh);
  1148. mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno,
  1149. le16_to_cpu(new_rb->rf_records.rl_used));
  1150. *ref_leaf_bh = new_bh;
  1151. new_bh = NULL;
  1152. out:
  1153. brelse(new_bh);
  1154. return ret;
  1155. }
  1156. static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
  1157. struct ocfs2_refcount_rec *next)
  1158. {
  1159. if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
  1160. ocfs2_get_ref_rec_low_cpos(next))
  1161. return 1;
  1162. return 0;
  1163. }
  1164. static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
  1165. {
  1166. const struct ocfs2_refcount_rec *l = a, *r = b;
  1167. u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
  1168. u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
  1169. if (l_cpos > r_cpos)
  1170. return 1;
  1171. if (l_cpos < r_cpos)
  1172. return -1;
  1173. return 0;
  1174. }
  1175. static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
  1176. {
  1177. const struct ocfs2_refcount_rec *l = a, *r = b;
  1178. u64 l_cpos = le64_to_cpu(l->r_cpos);
  1179. u64 r_cpos = le64_to_cpu(r->r_cpos);
  1180. if (l_cpos > r_cpos)
  1181. return 1;
  1182. if (l_cpos < r_cpos)
  1183. return -1;
  1184. return 0;
  1185. }
  1186. static void swap_refcount_rec(void *a, void *b, int size)
  1187. {
  1188. struct ocfs2_refcount_rec *l = a, *r = b, tmp;
  1189. tmp = *(struct ocfs2_refcount_rec *)l;
  1190. *(struct ocfs2_refcount_rec *)l =
  1191. *(struct ocfs2_refcount_rec *)r;
  1192. *(struct ocfs2_refcount_rec *)r = tmp;
  1193. }
  1194. /*
  1195. * The refcount cpos are ordered by their 64bit cpos,
  1196. * But we will use the low 32 bit to be the e_cpos in the b-tree.
  1197. * So we need to make sure that this pos isn't intersected with others.
  1198. *
  1199. * Note: The refcount block is already sorted by their low 32 bit cpos,
  1200. * So just try the middle pos first, and we will exit when we find
  1201. * the good position.
  1202. */
  1203. static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
  1204. u32 *split_pos, int *split_index)
  1205. {
  1206. int num_used = le16_to_cpu(rl->rl_used);
  1207. int delta, middle = num_used / 2;
  1208. for (delta = 0; delta < middle; delta++) {
  1209. /* Let's check delta earlier than middle */
  1210. if (ocfs2_refcount_rec_no_intersect(
  1211. &rl->rl_recs[middle - delta - 1],
  1212. &rl->rl_recs[middle - delta])) {
  1213. *split_index = middle - delta;
  1214. break;
  1215. }
  1216. /* For even counts, don't walk off the end */
  1217. if ((middle + delta + 1) == num_used)
  1218. continue;
  1219. /* Now try delta past middle */
  1220. if (ocfs2_refcount_rec_no_intersect(
  1221. &rl->rl_recs[middle + delta],
  1222. &rl->rl_recs[middle + delta + 1])) {
  1223. *split_index = middle + delta + 1;
  1224. break;
  1225. }
  1226. }
  1227. if (delta >= middle)
  1228. return -ENOSPC;
  1229. *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
  1230. return 0;
  1231. }
  1232. static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
  1233. struct buffer_head *new_bh,
  1234. u32 *split_cpos)
  1235. {
  1236. int split_index = 0, num_moved, ret;
  1237. u32 cpos = 0;
  1238. struct ocfs2_refcount_block *rb =
  1239. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1240. struct ocfs2_refcount_list *rl = &rb->rf_records;
  1241. struct ocfs2_refcount_block *new_rb =
  1242. (struct ocfs2_refcount_block *)new_bh->b_data;
  1243. struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
  1244. mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n",
  1245. (unsigned long long)ref_leaf_bh->b_blocknr,
  1246. le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
  1247. /*
  1248. * XXX: Improvement later.
  1249. * If we know all the high 32 bit cpos is the same, no need to sort.
  1250. *
  1251. * In order to make the whole process safe, we do:
  1252. * 1. sort the entries by their low 32 bit cpos first so that we can
  1253. * find the split cpos easily.
  1254. * 2. call ocfs2_insert_extent to insert the new refcount block.
  1255. * 3. move the refcount rec to the new block.
  1256. * 4. sort the entries by their 64 bit cpos.
  1257. * 5. dirty the new_rb and rb.
  1258. */
  1259. sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
  1260. sizeof(struct ocfs2_refcount_rec),
  1261. cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
  1262. ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
  1263. if (ret) {
  1264. mlog_errno(ret);
  1265. return ret;
  1266. }
  1267. new_rb->rf_cpos = cpu_to_le32(cpos);
  1268. /* move refcount records starting from split_index to the new block. */
  1269. num_moved = le16_to_cpu(rl->rl_used) - split_index;
  1270. memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
  1271. num_moved * sizeof(struct ocfs2_refcount_rec));
  1272. /*ok, remove the entries we just moved over to the other block. */
  1273. memset(&rl->rl_recs[split_index], 0,
  1274. num_moved * sizeof(struct ocfs2_refcount_rec));
  1275. /* change old and new rl_used accordingly. */
  1276. le16_add_cpu(&rl->rl_used, -num_moved);
  1277. new_rl->rl_used = cpu_to_le16(num_moved);
  1278. sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
  1279. sizeof(struct ocfs2_refcount_rec),
  1280. cmp_refcount_rec_by_cpos, swap_refcount_rec);
  1281. sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
  1282. sizeof(struct ocfs2_refcount_rec),
  1283. cmp_refcount_rec_by_cpos, swap_refcount_rec);
  1284. *split_cpos = cpos;
  1285. return 0;
  1286. }
  1287. static int ocfs2_new_leaf_refcount_block(handle_t *handle,
  1288. struct ocfs2_caching_info *ci,
  1289. struct buffer_head *ref_root_bh,
  1290. struct buffer_head *ref_leaf_bh,
  1291. struct ocfs2_alloc_context *meta_ac)
  1292. {
  1293. int ret;
  1294. u16 suballoc_bit_start;
  1295. u32 num_got, new_cpos;
  1296. u64 suballoc_loc, blkno;
  1297. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1298. struct ocfs2_refcount_block *root_rb =
  1299. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1300. struct buffer_head *new_bh = NULL;
  1301. struct ocfs2_refcount_block *new_rb;
  1302. struct ocfs2_extent_tree ref_et;
  1303. BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
  1304. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  1305. OCFS2_JOURNAL_ACCESS_WRITE);
  1306. if (ret) {
  1307. mlog_errno(ret);
  1308. goto out;
  1309. }
  1310. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1311. OCFS2_JOURNAL_ACCESS_WRITE);
  1312. if (ret) {
  1313. mlog_errno(ret);
  1314. goto out;
  1315. }
  1316. ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
  1317. &suballoc_bit_start, &num_got,
  1318. &blkno);
  1319. if (ret) {
  1320. mlog_errno(ret);
  1321. goto out;
  1322. }
  1323. new_bh = sb_getblk(sb, blkno);
  1324. if (new_bh == NULL) {
  1325. ret = -EIO;
  1326. mlog_errno(ret);
  1327. goto out;
  1328. }
  1329. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  1330. ret = ocfs2_journal_access_rb(handle, ci, new_bh,
  1331. OCFS2_JOURNAL_ACCESS_CREATE);
  1332. if (ret) {
  1333. mlog_errno(ret);
  1334. goto out;
  1335. }
  1336. /* Initialize ocfs2_refcount_block. */
  1337. new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  1338. memset(new_rb, 0, sb->s_blocksize);
  1339. strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
  1340. new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
  1341. new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
  1342. new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  1343. new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
  1344. new_rb->rf_blkno = cpu_to_le64(blkno);
  1345. new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
  1346. new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
  1347. new_rb->rf_records.rl_count =
  1348. cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
  1349. new_rb->rf_generation = root_rb->rf_generation;
  1350. ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
  1351. if (ret) {
  1352. mlog_errno(ret);
  1353. goto out;
  1354. }
  1355. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1356. ocfs2_journal_dirty(handle, new_bh);
  1357. ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
  1358. mlog(0, "insert new leaf block %llu at %u\n",
  1359. (unsigned long long)new_bh->b_blocknr, new_cpos);
  1360. /* Insert the new leaf block with the specific offset cpos. */
  1361. ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
  1362. 1, 0, meta_ac);
  1363. if (ret)
  1364. mlog_errno(ret);
  1365. out:
  1366. brelse(new_bh);
  1367. return ret;
  1368. }
  1369. static int ocfs2_expand_refcount_tree(handle_t *handle,
  1370. struct ocfs2_caching_info *ci,
  1371. struct buffer_head *ref_root_bh,
  1372. struct buffer_head *ref_leaf_bh,
  1373. struct ocfs2_alloc_context *meta_ac)
  1374. {
  1375. int ret;
  1376. struct buffer_head *expand_bh = NULL;
  1377. if (ref_root_bh == ref_leaf_bh) {
  1378. /*
  1379. * the old root bh hasn't been expanded to a b-tree,
  1380. * so expand it first.
  1381. */
  1382. ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
  1383. &expand_bh, meta_ac);
  1384. if (ret) {
  1385. mlog_errno(ret);
  1386. goto out;
  1387. }
  1388. } else {
  1389. expand_bh = ref_leaf_bh;
  1390. get_bh(expand_bh);
  1391. }
  1392. /* Now add a new refcount block into the tree.*/
  1393. ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
  1394. expand_bh, meta_ac);
  1395. if (ret)
  1396. mlog_errno(ret);
  1397. out:
  1398. brelse(expand_bh);
  1399. return ret;
  1400. }
  1401. /*
  1402. * Adjust the extent rec in b-tree representing ref_leaf_bh.
  1403. *
  1404. * Only called when we have inserted a new refcount rec at index 0
  1405. * which means ocfs2_extent_rec.e_cpos may need some change.
  1406. */
  1407. static int ocfs2_adjust_refcount_rec(handle_t *handle,
  1408. struct ocfs2_caching_info *ci,
  1409. struct buffer_head *ref_root_bh,
  1410. struct buffer_head *ref_leaf_bh,
  1411. struct ocfs2_refcount_rec *rec)
  1412. {
  1413. int ret = 0, i;
  1414. u32 new_cpos, old_cpos;
  1415. struct ocfs2_path *path = NULL;
  1416. struct ocfs2_extent_tree et;
  1417. struct ocfs2_refcount_block *rb =
  1418. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1419. struct ocfs2_extent_list *el;
  1420. if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
  1421. goto out;
  1422. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1423. old_cpos = le32_to_cpu(rb->rf_cpos);
  1424. new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
  1425. if (old_cpos <= new_cpos)
  1426. goto out;
  1427. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  1428. path = ocfs2_new_path_from_et(&et);
  1429. if (!path) {
  1430. ret = -ENOMEM;
  1431. mlog_errno(ret);
  1432. goto out;
  1433. }
  1434. ret = ocfs2_find_path(ci, path, old_cpos);
  1435. if (ret) {
  1436. mlog_errno(ret);
  1437. goto out;
  1438. }
  1439. /*
  1440. * 2 more credits, one for the leaf refcount block, one for
  1441. * the extent block contains the extent rec.
  1442. */
  1443. ret = ocfs2_extend_trans(handle, 2);
  1444. if (ret < 0) {
  1445. mlog_errno(ret);
  1446. goto out;
  1447. }
  1448. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1449. OCFS2_JOURNAL_ACCESS_WRITE);
  1450. if (ret < 0) {
  1451. mlog_errno(ret);
  1452. goto out;
  1453. }
  1454. ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
  1455. OCFS2_JOURNAL_ACCESS_WRITE);
  1456. if (ret < 0) {
  1457. mlog_errno(ret);
  1458. goto out;
  1459. }
  1460. /* change the leaf extent block first. */
  1461. el = path_leaf_el(path);
  1462. for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
  1463. if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
  1464. break;
  1465. BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
  1466. el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
  1467. /* change the r_cpos in the leaf block. */
  1468. rb->rf_cpos = cpu_to_le32(new_cpos);
  1469. ocfs2_journal_dirty(handle, path_leaf_bh(path));
  1470. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1471. out:
  1472. ocfs2_free_path(path);
  1473. return ret;
  1474. }
  1475. static int ocfs2_insert_refcount_rec(handle_t *handle,
  1476. struct ocfs2_caching_info *ci,
  1477. struct buffer_head *ref_root_bh,
  1478. struct buffer_head *ref_leaf_bh,
  1479. struct ocfs2_refcount_rec *rec,
  1480. int index, int merge,
  1481. struct ocfs2_alloc_context *meta_ac)
  1482. {
  1483. int ret;
  1484. struct ocfs2_refcount_block *rb =
  1485. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1486. struct ocfs2_refcount_list *rf_list = &rb->rf_records;
  1487. struct buffer_head *new_bh = NULL;
  1488. BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
  1489. if (rf_list->rl_used == rf_list->rl_count) {
  1490. u64 cpos = le64_to_cpu(rec->r_cpos);
  1491. u32 len = le32_to_cpu(rec->r_clusters);
  1492. ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
  1493. ref_leaf_bh, meta_ac);
  1494. if (ret) {
  1495. mlog_errno(ret);
  1496. goto out;
  1497. }
  1498. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1499. cpos, len, NULL, &index,
  1500. &new_bh);
  1501. if (ret) {
  1502. mlog_errno(ret);
  1503. goto out;
  1504. }
  1505. ref_leaf_bh = new_bh;
  1506. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1507. rf_list = &rb->rf_records;
  1508. }
  1509. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1510. OCFS2_JOURNAL_ACCESS_WRITE);
  1511. if (ret) {
  1512. mlog_errno(ret);
  1513. goto out;
  1514. }
  1515. if (index < le16_to_cpu(rf_list->rl_used))
  1516. memmove(&rf_list->rl_recs[index + 1],
  1517. &rf_list->rl_recs[index],
  1518. (le16_to_cpu(rf_list->rl_used) - index) *
  1519. sizeof(struct ocfs2_refcount_rec));
  1520. mlog(0, "insert refcount record start %llu, len %u, count %u "
  1521. "to leaf block %llu at index %d\n",
  1522. (unsigned long long)le64_to_cpu(rec->r_cpos),
  1523. le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount),
  1524. (unsigned long long)ref_leaf_bh->b_blocknr, index);
  1525. rf_list->rl_recs[index] = *rec;
  1526. le16_add_cpu(&rf_list->rl_used, 1);
  1527. if (merge)
  1528. ocfs2_refcount_rec_merge(rb, index);
  1529. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1530. if (index == 0) {
  1531. ret = ocfs2_adjust_refcount_rec(handle, ci,
  1532. ref_root_bh,
  1533. ref_leaf_bh, rec);
  1534. if (ret)
  1535. mlog_errno(ret);
  1536. }
  1537. out:
  1538. brelse(new_bh);
  1539. return ret;
  1540. }
  1541. /*
  1542. * Split the refcount_rec indexed by "index" in ref_leaf_bh.
  1543. * This is much simple than our b-tree code.
  1544. * split_rec is the new refcount rec we want to insert.
  1545. * If split_rec->r_refcount > 0, we are changing the refcount(in case we
  1546. * increase refcount or decrease a refcount to non-zero).
  1547. * If split_rec->r_refcount == 0, we are punching a hole in current refcount
  1548. * rec( in case we decrease a refcount to zero).
  1549. */
  1550. static int ocfs2_split_refcount_rec(handle_t *handle,
  1551. struct ocfs2_caching_info *ci,
  1552. struct buffer_head *ref_root_bh,
  1553. struct buffer_head *ref_leaf_bh,
  1554. struct ocfs2_refcount_rec *split_rec,
  1555. int index, int merge,
  1556. struct ocfs2_alloc_context *meta_ac,
  1557. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1558. {
  1559. int ret, recs_need;
  1560. u32 len;
  1561. struct ocfs2_refcount_block *rb =
  1562. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1563. struct ocfs2_refcount_list *rf_list = &rb->rf_records;
  1564. struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
  1565. struct ocfs2_refcount_rec *tail_rec = NULL;
  1566. struct buffer_head *new_bh = NULL;
  1567. BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
  1568. mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n",
  1569. le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters),
  1570. le64_to_cpu(split_rec->r_cpos),
  1571. le32_to_cpu(split_rec->r_clusters));
  1572. /*
  1573. * If we just need to split the header or tail clusters,
  1574. * no more recs are needed, just split is OK.
  1575. * Otherwise we at least need one new recs.
  1576. */
  1577. if (!split_rec->r_refcount &&
  1578. (split_rec->r_cpos == orig_rec->r_cpos ||
  1579. le64_to_cpu(split_rec->r_cpos) +
  1580. le32_to_cpu(split_rec->r_clusters) ==
  1581. le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
  1582. recs_need = 0;
  1583. else
  1584. recs_need = 1;
  1585. /*
  1586. * We need one more rec if we split in the middle and the new rec have
  1587. * some refcount in it.
  1588. */
  1589. if (split_rec->r_refcount &&
  1590. (split_rec->r_cpos != orig_rec->r_cpos &&
  1591. le64_to_cpu(split_rec->r_cpos) +
  1592. le32_to_cpu(split_rec->r_clusters) !=
  1593. le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
  1594. recs_need++;
  1595. /* If the leaf block don't have enough record, expand it. */
  1596. if (le16_to_cpu(rf_list->rl_used) + recs_need >
  1597. le16_to_cpu(rf_list->rl_count)) {
  1598. struct ocfs2_refcount_rec tmp_rec;
  1599. u64 cpos = le64_to_cpu(orig_rec->r_cpos);
  1600. len = le32_to_cpu(orig_rec->r_clusters);
  1601. ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
  1602. ref_leaf_bh, meta_ac);
  1603. if (ret) {
  1604. mlog_errno(ret);
  1605. goto out;
  1606. }
  1607. /*
  1608. * We have to re-get it since now cpos may be moved to
  1609. * another leaf block.
  1610. */
  1611. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1612. cpos, len, &tmp_rec, &index,
  1613. &new_bh);
  1614. if (ret) {
  1615. mlog_errno(ret);
  1616. goto out;
  1617. }
  1618. ref_leaf_bh = new_bh;
  1619. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1620. rf_list = &rb->rf_records;
  1621. orig_rec = &rf_list->rl_recs[index];
  1622. }
  1623. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1624. OCFS2_JOURNAL_ACCESS_WRITE);
  1625. if (ret) {
  1626. mlog_errno(ret);
  1627. goto out;
  1628. }
  1629. /*
  1630. * We have calculated out how many new records we need and store
  1631. * in recs_need, so spare enough space first by moving the records
  1632. * after "index" to the end.
  1633. */
  1634. if (index != le16_to_cpu(rf_list->rl_used) - 1)
  1635. memmove(&rf_list->rl_recs[index + 1 + recs_need],
  1636. &rf_list->rl_recs[index + 1],
  1637. (le16_to_cpu(rf_list->rl_used) - index - 1) *
  1638. sizeof(struct ocfs2_refcount_rec));
  1639. len = (le64_to_cpu(orig_rec->r_cpos) +
  1640. le32_to_cpu(orig_rec->r_clusters)) -
  1641. (le64_to_cpu(split_rec->r_cpos) +
  1642. le32_to_cpu(split_rec->r_clusters));
  1643. /*
  1644. * If we have "len", the we will split in the tail and move it
  1645. * to the end of the space we have just spared.
  1646. */
  1647. if (len) {
  1648. tail_rec = &rf_list->rl_recs[index + recs_need];
  1649. memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
  1650. le64_add_cpu(&tail_rec->r_cpos,
  1651. le32_to_cpu(tail_rec->r_clusters) - len);
  1652. tail_rec->r_clusters = cpu_to_le32(len);
  1653. }
  1654. /*
  1655. * If the split pos isn't the same as the original one, we need to
  1656. * split in the head.
  1657. *
  1658. * Note: We have the chance that split_rec.r_refcount = 0,
  1659. * recs_need = 0 and len > 0, which means we just cut the head from
  1660. * the orig_rec and in that case we have done some modification in
  1661. * orig_rec above, so the check for r_cpos is faked.
  1662. */
  1663. if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
  1664. len = le64_to_cpu(split_rec->r_cpos) -
  1665. le64_to_cpu(orig_rec->r_cpos);
  1666. orig_rec->r_clusters = cpu_to_le32(len);
  1667. index++;
  1668. }
  1669. le16_add_cpu(&rf_list->rl_used, recs_need);
  1670. if (split_rec->r_refcount) {
  1671. rf_list->rl_recs[index] = *split_rec;
  1672. mlog(0, "insert refcount record start %llu, len %u, count %u "
  1673. "to leaf block %llu at index %d\n",
  1674. (unsigned long long)le64_to_cpu(split_rec->r_cpos),
  1675. le32_to_cpu(split_rec->r_clusters),
  1676. le32_to_cpu(split_rec->r_refcount),
  1677. (unsigned long long)ref_leaf_bh->b_blocknr, index);
  1678. if (merge)
  1679. ocfs2_refcount_rec_merge(rb, index);
  1680. }
  1681. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1682. out:
  1683. brelse(new_bh);
  1684. return ret;
  1685. }
  1686. static int __ocfs2_increase_refcount(handle_t *handle,
  1687. struct ocfs2_caching_info *ci,
  1688. struct buffer_head *ref_root_bh,
  1689. u64 cpos, u32 len, int merge,
  1690. struct ocfs2_alloc_context *meta_ac,
  1691. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1692. {
  1693. int ret = 0, index;
  1694. struct buffer_head *ref_leaf_bh = NULL;
  1695. struct ocfs2_refcount_rec rec;
  1696. unsigned int set_len = 0;
  1697. mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n",
  1698. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  1699. (unsigned long long)cpos, len);
  1700. while (len) {
  1701. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1702. cpos, len, &rec, &index,
  1703. &ref_leaf_bh);
  1704. if (ret) {
  1705. mlog_errno(ret);
  1706. goto out;
  1707. }
  1708. set_len = le32_to_cpu(rec.r_clusters);
  1709. /*
  1710. * Here we may meet with 3 situations:
  1711. *
  1712. * 1. If we find an already existing record, and the length
  1713. * is the same, cool, we just need to increase the r_refcount
  1714. * and it is OK.
  1715. * 2. If we find a hole, just insert it with r_refcount = 1.
  1716. * 3. If we are in the middle of one extent record, split
  1717. * it.
  1718. */
  1719. if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
  1720. set_len <= len) {
  1721. mlog(0, "increase refcount rec, start %llu, len %u, "
  1722. "count %u\n", (unsigned long long)cpos, set_len,
  1723. le32_to_cpu(rec.r_refcount));
  1724. ret = ocfs2_change_refcount_rec(handle, ci,
  1725. ref_leaf_bh, index,
  1726. merge, 1);
  1727. if (ret) {
  1728. mlog_errno(ret);
  1729. goto out;
  1730. }
  1731. } else if (!rec.r_refcount) {
  1732. rec.r_refcount = cpu_to_le32(1);
  1733. mlog(0, "insert refcount rec, start %llu, len %u\n",
  1734. (unsigned long long)le64_to_cpu(rec.r_cpos),
  1735. set_len);
  1736. ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
  1737. ref_leaf_bh,
  1738. &rec, index,
  1739. merge, meta_ac);
  1740. if (ret) {
  1741. mlog_errno(ret);
  1742. goto out;
  1743. }
  1744. } else {
  1745. set_len = min((u64)(cpos + len),
  1746. le64_to_cpu(rec.r_cpos) + set_len) - cpos;
  1747. rec.r_cpos = cpu_to_le64(cpos);
  1748. rec.r_clusters = cpu_to_le32(set_len);
  1749. le32_add_cpu(&rec.r_refcount, 1);
  1750. mlog(0, "split refcount rec, start %llu, "
  1751. "len %u, count %u\n",
  1752. (unsigned long long)le64_to_cpu(rec.r_cpos),
  1753. set_len, le32_to_cpu(rec.r_refcount));
  1754. ret = ocfs2_split_refcount_rec(handle, ci,
  1755. ref_root_bh, ref_leaf_bh,
  1756. &rec, index, merge,
  1757. meta_ac, dealloc);
  1758. if (ret) {
  1759. mlog_errno(ret);
  1760. goto out;
  1761. }
  1762. }
  1763. cpos += set_len;
  1764. len -= set_len;
  1765. brelse(ref_leaf_bh);
  1766. ref_leaf_bh = NULL;
  1767. }
  1768. out:
  1769. brelse(ref_leaf_bh);
  1770. return ret;
  1771. }
  1772. static int ocfs2_remove_refcount_extent(handle_t *handle,
  1773. struct ocfs2_caching_info *ci,
  1774. struct buffer_head *ref_root_bh,
  1775. struct buffer_head *ref_leaf_bh,
  1776. struct ocfs2_alloc_context *meta_ac,
  1777. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1778. {
  1779. int ret;
  1780. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1781. struct ocfs2_refcount_block *rb =
  1782. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1783. struct ocfs2_extent_tree et;
  1784. BUG_ON(rb->rf_records.rl_used);
  1785. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  1786. ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
  1787. 1, meta_ac, dealloc);
  1788. if (ret) {
  1789. mlog_errno(ret);
  1790. goto out;
  1791. }
  1792. ocfs2_remove_from_cache(ci, ref_leaf_bh);
  1793. /*
  1794. * add the freed block to the dealloc so that it will be freed
  1795. * when we run dealloc.
  1796. */
  1797. ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
  1798. le16_to_cpu(rb->rf_suballoc_slot),
  1799. le64_to_cpu(rb->rf_suballoc_loc),
  1800. le64_to_cpu(rb->rf_blkno),
  1801. le16_to_cpu(rb->rf_suballoc_bit));
  1802. if (ret) {
  1803. mlog_errno(ret);
  1804. goto out;
  1805. }
  1806. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  1807. OCFS2_JOURNAL_ACCESS_WRITE);
  1808. if (ret) {
  1809. mlog_errno(ret);
  1810. goto out;
  1811. }
  1812. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1813. le32_add_cpu(&rb->rf_clusters, -1);
  1814. /*
  1815. * check whether we need to restore the root refcount block if
  1816. * there is no leaf extent block at atll.
  1817. */
  1818. if (!rb->rf_list.l_next_free_rec) {
  1819. BUG_ON(rb->rf_clusters);
  1820. mlog(0, "reset refcount tree root %llu to be a record block.\n",
  1821. (unsigned long long)ref_root_bh->b_blocknr);
  1822. rb->rf_flags = 0;
  1823. rb->rf_parent = 0;
  1824. rb->rf_cpos = 0;
  1825. memset(&rb->rf_records, 0, sb->s_blocksize -
  1826. offsetof(struct ocfs2_refcount_block, rf_records));
  1827. rb->rf_records.rl_count =
  1828. cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
  1829. }
  1830. ocfs2_journal_dirty(handle, ref_root_bh);
  1831. out:
  1832. return ret;
  1833. }
  1834. int ocfs2_increase_refcount(handle_t *handle,
  1835. struct ocfs2_caching_info *ci,
  1836. struct buffer_head *ref_root_bh,
  1837. u64 cpos, u32 len,
  1838. struct ocfs2_alloc_context *meta_ac,
  1839. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1840. {
  1841. return __ocfs2_increase_refcount(handle, ci, ref_root_bh,
  1842. cpos, len, 1,
  1843. meta_ac, dealloc);
  1844. }
  1845. static int ocfs2_decrease_refcount_rec(handle_t *handle,
  1846. struct ocfs2_caching_info *ci,
  1847. struct buffer_head *ref_root_bh,
  1848. struct buffer_head *ref_leaf_bh,
  1849. int index, u64 cpos, unsigned int len,
  1850. struct ocfs2_alloc_context *meta_ac,
  1851. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1852. {
  1853. int ret;
  1854. struct ocfs2_refcount_block *rb =
  1855. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1856. struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
  1857. BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
  1858. BUG_ON(cpos + len >
  1859. le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
  1860. if (cpos == le64_to_cpu(rec->r_cpos) &&
  1861. len == le32_to_cpu(rec->r_clusters))
  1862. ret = ocfs2_change_refcount_rec(handle, ci,
  1863. ref_leaf_bh, index, 1, -1);
  1864. else {
  1865. struct ocfs2_refcount_rec split = *rec;
  1866. split.r_cpos = cpu_to_le64(cpos);
  1867. split.r_clusters = cpu_to_le32(len);
  1868. le32_add_cpu(&split.r_refcount, -1);
  1869. mlog(0, "split refcount rec, start %llu, "
  1870. "len %u, count %u, original start %llu, len %u\n",
  1871. (unsigned long long)le64_to_cpu(split.r_cpos),
  1872. len, le32_to_cpu(split.r_refcount),
  1873. (unsigned long long)le64_to_cpu(rec->r_cpos),
  1874. le32_to_cpu(rec->r_clusters));
  1875. ret = ocfs2_split_refcount_rec(handle, ci,
  1876. ref_root_bh, ref_leaf_bh,
  1877. &split, index, 1,
  1878. meta_ac, dealloc);
  1879. }
  1880. if (ret) {
  1881. mlog_errno(ret);
  1882. goto out;
  1883. }
  1884. /* Remove the leaf refcount block if it contains no refcount record. */
  1885. if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
  1886. ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
  1887. ref_leaf_bh, meta_ac,
  1888. dealloc);
  1889. if (ret)
  1890. mlog_errno(ret);
  1891. }
  1892. out:
  1893. return ret;
  1894. }
  1895. static int __ocfs2_decrease_refcount(handle_t *handle,
  1896. struct ocfs2_caching_info *ci,
  1897. struct buffer_head *ref_root_bh,
  1898. u64 cpos, u32 len,
  1899. struct ocfs2_alloc_context *meta_ac,
  1900. struct ocfs2_cached_dealloc_ctxt *dealloc,
  1901. int delete)
  1902. {
  1903. int ret = 0, index = 0;
  1904. struct ocfs2_refcount_rec rec;
  1905. unsigned int r_count = 0, r_len;
  1906. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1907. struct buffer_head *ref_leaf_bh = NULL;
  1908. mlog(0, "Tree owner %llu, decrease refcount start %llu, "
  1909. "len %u, delete %u\n",
  1910. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  1911. (unsigned long long)cpos, len, delete);
  1912. while (len) {
  1913. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1914. cpos, len, &rec, &index,
  1915. &ref_leaf_bh);
  1916. if (ret) {
  1917. mlog_errno(ret);
  1918. goto out;
  1919. }
  1920. r_count = le32_to_cpu(rec.r_refcount);
  1921. BUG_ON(r_count == 0);
  1922. if (!delete)
  1923. BUG_ON(r_count > 1);
  1924. r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
  1925. le32_to_cpu(rec.r_clusters)) - cpos;
  1926. ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
  1927. ref_leaf_bh, index,
  1928. cpos, r_len,
  1929. meta_ac, dealloc);
  1930. if (ret) {
  1931. mlog_errno(ret);
  1932. goto out;
  1933. }
  1934. if (le32_to_cpu(rec.r_refcount) == 1 && delete) {
  1935. ret = ocfs2_cache_cluster_dealloc(dealloc,
  1936. ocfs2_clusters_to_blocks(sb, cpos),
  1937. r_len);
  1938. if (ret) {
  1939. mlog_errno(ret);
  1940. goto out;
  1941. }
  1942. }
  1943. cpos += r_len;
  1944. len -= r_len;
  1945. brelse(ref_leaf_bh);
  1946. ref_leaf_bh = NULL;
  1947. }
  1948. out:
  1949. brelse(ref_leaf_bh);
  1950. return ret;
  1951. }
  1952. /* Caller must hold refcount tree lock. */
  1953. int ocfs2_decrease_refcount(struct inode *inode,
  1954. handle_t *handle, u32 cpos, u32 len,
  1955. struct ocfs2_alloc_context *meta_ac,
  1956. struct ocfs2_cached_dealloc_ctxt *dealloc,
  1957. int delete)
  1958. {
  1959. int ret;
  1960. u64 ref_blkno;
  1961. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  1962. struct buffer_head *ref_root_bh = NULL;
  1963. struct ocfs2_refcount_tree *tree;
  1964. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  1965. ret = ocfs2_get_refcount_block(inode, &ref_blkno);
  1966. if (ret) {
  1967. mlog_errno(ret);
  1968. goto out;
  1969. }
  1970. ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
  1971. if (ret) {
  1972. mlog_errno(ret);
  1973. goto out;
  1974. }
  1975. ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
  1976. &ref_root_bh);
  1977. if (ret) {
  1978. mlog_errno(ret);
  1979. goto out;
  1980. }
  1981. ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
  1982. cpos, len, meta_ac, dealloc, delete);
  1983. if (ret)
  1984. mlog_errno(ret);
  1985. out:
  1986. brelse(ref_root_bh);
  1987. return ret;
  1988. }
  1989. /*
  1990. * Mark the already-existing extent at cpos as refcounted for len clusters.
  1991. * This adds the refcount extent flag.
  1992. *
  1993. * If the existing extent is larger than the request, initiate a
  1994. * split. An attempt will be made at merging with adjacent extents.
  1995. *
  1996. * The caller is responsible for passing down meta_ac if we'll need it.
  1997. */
  1998. static int ocfs2_mark_extent_refcounted(struct inode *inode,
  1999. struct ocfs2_extent_tree *et,
  2000. handle_t *handle, u32 cpos,
  2001. u32 len, u32 phys,
  2002. struct ocfs2_alloc_context *meta_ac,
  2003. struct ocfs2_cached_dealloc_ctxt *dealloc)
  2004. {
  2005. int ret;
  2006. mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n",
  2007. inode->i_ino, cpos, len, phys);
  2008. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  2009. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  2010. "tree, but the feature bit is not set in the "
  2011. "super block.", inode->i_ino);
  2012. ret = -EROFS;
  2013. goto out;
  2014. }
  2015. ret = ocfs2_change_extent_flag(handle, et, cpos,
  2016. len, phys, meta_ac, dealloc,
  2017. OCFS2_EXT_REFCOUNTED, 0);
  2018. if (ret)
  2019. mlog_errno(ret);
  2020. out:
  2021. return ret;
  2022. }
  2023. /*
  2024. * Given some contiguous physical clusters, calculate what we need
  2025. * for modifying their refcount.
  2026. */
  2027. static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
  2028. struct ocfs2_caching_info *ci,
  2029. struct buffer_head *ref_root_bh,
  2030. u64 start_cpos,
  2031. u32 clusters,
  2032. int *meta_add,
  2033. int *credits)
  2034. {
  2035. int ret = 0, index, ref_blocks = 0, recs_add = 0;
  2036. u64 cpos = start_cpos;
  2037. struct ocfs2_refcount_block *rb;
  2038. struct ocfs2_refcount_rec rec;
  2039. struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
  2040. u32 len;
  2041. mlog(0, "start_cpos %llu, clusters %u\n",
  2042. (unsigned long long)start_cpos, clusters);
  2043. while (clusters) {
  2044. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  2045. cpos, clusters, &rec,
  2046. &index, &ref_leaf_bh);
  2047. if (ret) {
  2048. mlog_errno(ret);
  2049. goto out;
  2050. }
  2051. if (ref_leaf_bh != prev_bh) {
  2052. /*
  2053. * Now we encounter a new leaf block, so calculate
  2054. * whether we need to extend the old leaf.
  2055. */
  2056. if (prev_bh) {
  2057. rb = (struct ocfs2_refcount_block *)
  2058. prev_bh->b_data;
  2059. if (le64_to_cpu(rb->rf_records.rl_used) +
  2060. recs_add >
  2061. le16_to_cpu(rb->rf_records.rl_count))
  2062. ref_blocks++;
  2063. }
  2064. recs_add = 0;
  2065. *credits += 1;
  2066. brelse(prev_bh);
  2067. prev_bh = ref_leaf_bh;
  2068. get_bh(prev_bh);
  2069. }
  2070. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  2071. mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu,"
  2072. "rec->r_clusters %u, rec->r_refcount %u, index %d\n",
  2073. recs_add, (unsigned long long)cpos, clusters,
  2074. (unsigned long long)le64_to_cpu(rec.r_cpos),
  2075. le32_to_cpu(rec.r_clusters),
  2076. le32_to_cpu(rec.r_refcount), index);
  2077. len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
  2078. le32_to_cpu(rec.r_clusters)) - cpos;
  2079. /*
  2080. * If the refcount rec already exist, cool. We just need
  2081. * to check whether there is a split. Otherwise we just need
  2082. * to increase the refcount.
  2083. * If we will insert one, increases recs_add.
  2084. *
  2085. * We record all the records which will be inserted to the
  2086. * same refcount block, so that we can tell exactly whether
  2087. * we need a new refcount block or not.
  2088. */
  2089. if (rec.r_refcount) {
  2090. /* Check whether we need a split at the beginning. */
  2091. if (cpos == start_cpos &&
  2092. cpos != le64_to_cpu(rec.r_cpos))
  2093. recs_add++;
  2094. /* Check whether we need a split in the end. */
  2095. if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
  2096. le32_to_cpu(rec.r_clusters))
  2097. recs_add++;
  2098. } else
  2099. recs_add++;
  2100. brelse(ref_leaf_bh);
  2101. ref_leaf_bh = NULL;
  2102. clusters -= len;
  2103. cpos += len;
  2104. }
  2105. if (prev_bh) {
  2106. rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
  2107. if (le64_to_cpu(rb->rf_records.rl_used) + recs_add >
  2108. le16_to_cpu(rb->rf_records.rl_count))
  2109. ref_blocks++;
  2110. *credits += 1;
  2111. }
  2112. if (!ref_blocks)
  2113. goto out;
  2114. mlog(0, "we need ref_blocks %d\n", ref_blocks);
  2115. *meta_add += ref_blocks;
  2116. *credits += ref_blocks;
  2117. /*
  2118. * So we may need ref_blocks to insert into the tree.
  2119. * That also means we need to change the b-tree and add that number
  2120. * of records since we never merge them.
  2121. * We need one more block for expansion since the new created leaf
  2122. * block is also full and needs split.
  2123. */
  2124. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  2125. if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
  2126. struct ocfs2_extent_tree et;
  2127. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  2128. *meta_add += ocfs2_extend_meta_needed(et.et_root_el);
  2129. *credits += ocfs2_calc_extend_credits(sb,
  2130. et.et_root_el,
  2131. ref_blocks);
  2132. } else {
  2133. *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
  2134. *meta_add += 1;
  2135. }
  2136. out:
  2137. brelse(ref_leaf_bh);
  2138. brelse(prev_bh);
  2139. return ret;
  2140. }
  2141. /*
  2142. * For refcount tree, we will decrease some contiguous clusters
  2143. * refcount count, so just go through it to see how many blocks
  2144. * we gonna touch and whether we need to create new blocks.
  2145. *
  2146. * Normally the refcount blocks store these refcount should be
  2147. * contiguous also, so that we can get the number easily.
  2148. * We will at most add split 2 refcount records and 2 more
  2149. * refcount blocks, so just check it in a rough way.
  2150. *
  2151. * Caller must hold refcount tree lock.
  2152. */
  2153. int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
  2154. u64 refcount_loc,
  2155. u64 phys_blkno,
  2156. u32 clusters,
  2157. int *credits,
  2158. int *ref_blocks)
  2159. {
  2160. int ret;
  2161. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  2162. struct buffer_head *ref_root_bh = NULL;
  2163. struct ocfs2_refcount_tree *tree;
  2164. u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
  2165. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  2166. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  2167. "tree, but the feature bit is not set in the "
  2168. "super block.", inode->i_ino);
  2169. ret = -EROFS;
  2170. goto out;
  2171. }
  2172. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  2173. ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
  2174. refcount_loc, &tree);
  2175. if (ret) {
  2176. mlog_errno(ret);
  2177. goto out;
  2178. }
  2179. ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc,
  2180. &ref_root_bh);
  2181. if (ret) {
  2182. mlog_errno(ret);
  2183. goto out;
  2184. }
  2185. ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
  2186. &tree->rf_ci,
  2187. ref_root_bh,
  2188. start_cpos, clusters,
  2189. ref_blocks, credits);
  2190. if (ret) {
  2191. mlog_errno(ret);
  2192. goto out;
  2193. }
  2194. mlog(0, "reserve new metadata %d blocks, credits = %d\n",
  2195. *ref_blocks, *credits);
  2196. out:
  2197. brelse(ref_root_bh);
  2198. return ret;
  2199. }
  2200. #define MAX_CONTIG_BYTES 1048576
  2201. static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
  2202. {
  2203. return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
  2204. }
  2205. static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
  2206. {
  2207. return ~(ocfs2_cow_contig_clusters(sb) - 1);
  2208. }
  2209. /*
  2210. * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
  2211. * find an offset (start + (n * contig_clusters)) that is closest to cpos
  2212. * while still being less than or equal to it.
  2213. *
  2214. * The goal is to break the extent at a multiple of contig_clusters.
  2215. */
  2216. static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
  2217. unsigned int start,
  2218. unsigned int cpos)
  2219. {
  2220. BUG_ON(start > cpos);
  2221. return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
  2222. }
  2223. /*
  2224. * Given a cluster count of len, pad it out so that it is a multiple
  2225. * of contig_clusters.
  2226. */
  2227. static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
  2228. unsigned int len)
  2229. {
  2230. unsigned int padded =
  2231. (len + (ocfs2_cow_contig_clusters(sb) - 1)) &
  2232. ocfs2_cow_contig_mask(sb);
  2233. /* Did we wrap? */
  2234. if (padded < len)
  2235. padded = UINT_MAX;
  2236. return padded;
  2237. }
  2238. /*
  2239. * Calculate out the start and number of virtual clusters we need to to CoW.
  2240. *
  2241. * cpos is vitual start cluster position we want to do CoW in a
  2242. * file and write_len is the cluster length.
  2243. * max_cpos is the place where we want to stop CoW intentionally.
  2244. *
  2245. * Normal we will start CoW from the beginning of extent record cotaining cpos.
  2246. * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
  2247. * get good I/O from the resulting extent tree.
  2248. */
  2249. static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
  2250. struct ocfs2_extent_list *el,
  2251. u32 cpos,
  2252. u32 write_len,
  2253. u32 max_cpos,
  2254. u32 *cow_start,
  2255. u32 *cow_len)
  2256. {
  2257. int ret = 0;
  2258. int tree_height = le16_to_cpu(el->l_tree_depth), i;
  2259. struct buffer_head *eb_bh = NULL;
  2260. struct ocfs2_extent_block *eb = NULL;
  2261. struct ocfs2_extent_rec *rec;
  2262. unsigned int want_clusters, rec_end = 0;
  2263. int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
  2264. int leaf_clusters;
  2265. BUG_ON(cpos + write_len > max_cpos);
  2266. if (tree_height > 0) {
  2267. ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
  2268. if (ret) {
  2269. mlog_errno(ret);
  2270. goto out;
  2271. }
  2272. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  2273. el = &eb->h_list;
  2274. if (el->l_tree_depth) {
  2275. ocfs2_error(inode->i_sb,
  2276. "Inode %lu has non zero tree depth in "
  2277. "leaf block %llu\n", inode->i_ino,
  2278. (unsigned long long)eb_bh->b_blocknr);
  2279. ret = -EROFS;
  2280. goto out;
  2281. }
  2282. }
  2283. *cow_len = 0;
  2284. for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
  2285. rec = &el->l_recs[i];
  2286. if (ocfs2_is_empty_extent(rec)) {
  2287. mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
  2288. "index %d\n", inode->i_ino, i);
  2289. continue;
  2290. }
  2291. if (le32_to_cpu(rec->e_cpos) +
  2292. le16_to_cpu(rec->e_leaf_clusters) <= cpos)
  2293. continue;
  2294. if (*cow_len == 0) {
  2295. /*
  2296. * We should find a refcounted record in the
  2297. * first pass.
  2298. */
  2299. BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
  2300. *cow_start = le32_to_cpu(rec->e_cpos);
  2301. }
  2302. /*
  2303. * If we encounter a hole, a non-refcounted record or
  2304. * pass the max_cpos, stop the search.
  2305. */
  2306. if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
  2307. (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) ||
  2308. (max_cpos <= le32_to_cpu(rec->e_cpos)))
  2309. break;
  2310. leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
  2311. rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
  2312. if (rec_end > max_cpos) {
  2313. rec_end = max_cpos;
  2314. leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos);
  2315. }
  2316. /*
  2317. * How many clusters do we actually need from
  2318. * this extent? First we see how many we actually
  2319. * need to complete the write. If that's smaller
  2320. * than contig_clusters, we try for contig_clusters.
  2321. */
  2322. if (!*cow_len)
  2323. want_clusters = write_len;
  2324. else
  2325. want_clusters = (cpos + write_len) -
  2326. (*cow_start + *cow_len);
  2327. if (want_clusters < contig_clusters)
  2328. want_clusters = contig_clusters;
  2329. /*
  2330. * If the write does not cover the whole extent, we
  2331. * need to calculate how we're going to split the extent.
  2332. * We try to do it on contig_clusters boundaries.
  2333. *
  2334. * Any extent smaller than contig_clusters will be
  2335. * CoWed in its entirety.
  2336. */
  2337. if (leaf_clusters <= contig_clusters)
  2338. *cow_len += leaf_clusters;
  2339. else if (*cow_len || (*cow_start == cpos)) {
  2340. /*
  2341. * This extent needs to be CoW'd from its
  2342. * beginning, so all we have to do is compute
  2343. * how many clusters to grab. We align
  2344. * want_clusters to the edge of contig_clusters
  2345. * to get better I/O.
  2346. */
  2347. want_clusters = ocfs2_cow_align_length(inode->i_sb,
  2348. want_clusters);
  2349. if (leaf_clusters < want_clusters)
  2350. *cow_len += leaf_clusters;
  2351. else
  2352. *cow_len += want_clusters;
  2353. } else if ((*cow_start + contig_clusters) >=
  2354. (cpos + write_len)) {
  2355. /*
  2356. * Breaking off contig_clusters at the front
  2357. * of the extent will cover our write. That's
  2358. * easy.
  2359. */
  2360. *cow_len = contig_clusters;
  2361. } else if ((rec_end - cpos) <= contig_clusters) {
  2362. /*
  2363. * Breaking off contig_clusters at the tail of
  2364. * this extent will cover cpos.
  2365. */
  2366. *cow_start = rec_end - contig_clusters;
  2367. *cow_len = contig_clusters;
  2368. } else if ((rec_end - cpos) <= want_clusters) {
  2369. /*
  2370. * While we can't fit the entire write in this
  2371. * extent, we know that the write goes from cpos
  2372. * to the end of the extent. Break that off.
  2373. * We try to break it at some multiple of
  2374. * contig_clusters from the front of the extent.
  2375. * Failing that (ie, cpos is within
  2376. * contig_clusters of the front), we'll CoW the
  2377. * entire extent.
  2378. */
  2379. *cow_start = ocfs2_cow_align_start(inode->i_sb,
  2380. *cow_start, cpos);
  2381. *cow_len = rec_end - *cow_start;
  2382. } else {
  2383. /*
  2384. * Ok, the entire write lives in the middle of
  2385. * this extent. Let's try to slice the extent up
  2386. * nicely. Optimally, our CoW region starts at
  2387. * m*contig_clusters from the beginning of the
  2388. * extent and goes for n*contig_clusters,
  2389. * covering the entire write.
  2390. */
  2391. *cow_start = ocfs2_cow_align_start(inode->i_sb,
  2392. *cow_start, cpos);
  2393. want_clusters = (cpos + write_len) - *cow_start;
  2394. want_clusters = ocfs2_cow_align_length(inode->i_sb,
  2395. want_clusters);
  2396. if (*cow_start + want_clusters <= rec_end)
  2397. *cow_len = want_clusters;
  2398. else
  2399. *cow_len = rec_end - *cow_start;
  2400. }
  2401. /* Have we covered our entire write yet? */
  2402. if ((*cow_start + *cow_len) >= (cpos + write_len))
  2403. break;
  2404. /*
  2405. * If we reach the end of the extent block and don't get enough
  2406. * clusters, continue with the next extent block if possible.
  2407. */
  2408. if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
  2409. eb && eb->h_next_leaf_blk) {
  2410. brelse(eb_bh);
  2411. eb_bh = NULL;
  2412. ret = ocfs2_read_extent_block(INODE_CACHE(inode),
  2413. le64_to_cpu(eb->h_next_leaf_blk),
  2414. &eb_bh);
  2415. if (ret) {
  2416. mlog_errno(ret);
  2417. goto out;
  2418. }
  2419. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  2420. el = &eb->h_list;
  2421. i = -1;
  2422. }
  2423. }
  2424. out:
  2425. brelse(eb_bh);
  2426. return ret;
  2427. }
  2428. /*
  2429. * Prepare meta_ac, data_ac and calculate credits when we want to add some
  2430. * num_clusters in data_tree "et" and change the refcount for the old
  2431. * clusters(starting form p_cluster) in the refcount tree.
  2432. *
  2433. * Note:
  2434. * 1. since we may split the old tree, so we at most will need num_clusters + 2
  2435. * more new leaf records.
  2436. * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
  2437. * just give data_ac = NULL.
  2438. */
  2439. static int ocfs2_lock_refcount_allocators(struct super_block *sb,
  2440. u32 p_cluster, u32 num_clusters,
  2441. struct ocfs2_extent_tree *et,
  2442. struct ocfs2_caching_info *ref_ci,
  2443. struct buffer_head *ref_root_bh,
  2444. struct ocfs2_alloc_context **meta_ac,
  2445. struct ocfs2_alloc_context **data_ac,
  2446. int *credits)
  2447. {
  2448. int ret = 0, meta_add = 0;
  2449. int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et);
  2450. if (num_free_extents < 0) {
  2451. ret = num_free_extents;
  2452. mlog_errno(ret);
  2453. goto out;
  2454. }
  2455. if (num_free_extents < num_clusters + 2)
  2456. meta_add =
  2457. ocfs2_extend_meta_needed(et->et_root_el);
  2458. *credits += ocfs2_calc_extend_credits(sb, et->et_root_el,
  2459. num_clusters + 2);
  2460. ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
  2461. p_cluster, num_clusters,
  2462. &meta_add, credits);
  2463. if (ret) {
  2464. mlog_errno(ret);
  2465. goto out;
  2466. }
  2467. mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n",
  2468. meta_add, num_clusters, *credits);
  2469. ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
  2470. meta_ac);
  2471. if (ret) {
  2472. mlog_errno(ret);
  2473. goto out;
  2474. }
  2475. if (data_ac) {
  2476. ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
  2477. data_ac);
  2478. if (ret)
  2479. mlog_errno(ret);
  2480. }
  2481. out:
  2482. if (ret) {
  2483. if (*meta_ac) {
  2484. ocfs2_free_alloc_context(*meta_ac);
  2485. *meta_ac = NULL;
  2486. }
  2487. }
  2488. return ret;
  2489. }
  2490. static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
  2491. {
  2492. BUG_ON(buffer_dirty(bh));
  2493. clear_buffer_mapped(bh);
  2494. return 0;
  2495. }
  2496. static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
  2497. struct ocfs2_cow_context *context,
  2498. u32 cpos, u32 old_cluster,
  2499. u32 new_cluster, u32 new_len)
  2500. {
  2501. int ret = 0, partial;
  2502. struct ocfs2_caching_info *ci = context->data_et.et_ci;
  2503. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  2504. u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
  2505. struct page *page;
  2506. pgoff_t page_index;
  2507. unsigned int from, to;
  2508. loff_t offset, end, map_end;
  2509. struct address_space *mapping = context->inode->i_mapping;
  2510. mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster,
  2511. new_cluster, new_len, cpos);
  2512. offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
  2513. end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
  2514. /*
  2515. * We only duplicate pages until we reach the page contains i_size - 1.
  2516. * So trim 'end' to i_size.
  2517. */
  2518. if (end > i_size_read(context->inode))
  2519. end = i_size_read(context->inode);
  2520. while (offset < end) {
  2521. page_index = offset >> PAGE_CACHE_SHIFT;
  2522. map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
  2523. if (map_end > end)
  2524. map_end = end;
  2525. /* from, to is the offset within the page. */
  2526. from = offset & (PAGE_CACHE_SIZE - 1);
  2527. to = PAGE_CACHE_SIZE;
  2528. if (map_end & (PAGE_CACHE_SIZE - 1))
  2529. to = map_end & (PAGE_CACHE_SIZE - 1);
  2530. page = grab_cache_page(mapping, page_index);
  2531. /*
  2532. * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
  2533. * can't be dirtied before we CoW it out.
  2534. */
  2535. if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
  2536. BUG_ON(PageDirty(page));
  2537. if (!PageUptodate(page)) {
  2538. ret = block_read_full_page(page, ocfs2_get_block);
  2539. if (ret) {
  2540. mlog_errno(ret);
  2541. goto unlock;
  2542. }
  2543. lock_page(page);
  2544. }
  2545. if (page_has_buffers(page)) {
  2546. ret = walk_page_buffers(handle, page_buffers(page),
  2547. from, to, &partial,
  2548. ocfs2_clear_cow_buffer);
  2549. if (ret) {
  2550. mlog_errno(ret);
  2551. goto unlock;
  2552. }
  2553. }
  2554. ocfs2_map_and_dirty_page(context->inode,
  2555. handle, from, to,
  2556. page, 0, &new_block);
  2557. mark_page_accessed(page);
  2558. unlock:
  2559. unlock_page(page);
  2560. page_cache_release(page);
  2561. page = NULL;
  2562. offset = map_end;
  2563. if (ret)
  2564. break;
  2565. }
  2566. return ret;
  2567. }
  2568. static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
  2569. struct ocfs2_cow_context *context,
  2570. u32 cpos, u32 old_cluster,
  2571. u32 new_cluster, u32 new_len)
  2572. {
  2573. int ret = 0;
  2574. struct super_block *sb = context->inode->i_sb;
  2575. struct ocfs2_caching_info *ci = context->data_et.et_ci;
  2576. int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
  2577. u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
  2578. u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
  2579. struct ocfs2_super *osb = OCFS2_SB(sb);
  2580. struct buffer_head *old_bh = NULL;
  2581. struct buffer_head *new_bh = NULL;
  2582. mlog(0, "old_cluster %u, new %u, len %u\n", old_cluster,
  2583. new_cluster, new_len);
  2584. for (i = 0; i < blocks; i++, old_block++, new_block++) {
  2585. new_bh = sb_getblk(osb->sb, new_block);
  2586. if (new_bh == NULL) {
  2587. ret = -EIO;
  2588. mlog_errno(ret);
  2589. break;
  2590. }
  2591. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  2592. ret = ocfs2_read_block(ci, old_block, &old_bh, NULL);
  2593. if (ret) {
  2594. mlog_errno(ret);
  2595. break;
  2596. }
  2597. ret = ocfs2_journal_access(handle, ci, new_bh,
  2598. OCFS2_JOURNAL_ACCESS_CREATE);
  2599. if (ret) {
  2600. mlog_errno(ret);
  2601. break;
  2602. }
  2603. memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
  2604. ocfs2_journal_dirty(handle, new_bh);
  2605. brelse(new_bh);
  2606. brelse(old_bh);
  2607. new_bh = NULL;
  2608. old_bh = NULL;
  2609. }
  2610. brelse(new_bh);
  2611. brelse(old_bh);
  2612. return ret;
  2613. }
  2614. static int ocfs2_clear_ext_refcount(handle_t *handle,
  2615. struct ocfs2_extent_tree *et,
  2616. u32 cpos, u32 p_cluster, u32 len,
  2617. unsigned int ext_flags,
  2618. struct ocfs2_alloc_context *meta_ac,
  2619. struct ocfs2_cached_dealloc_ctxt *dealloc)
  2620. {
  2621. int ret, index;
  2622. struct ocfs2_extent_rec replace_rec;
  2623. struct ocfs2_path *path = NULL;
  2624. struct ocfs2_extent_list *el;
  2625. struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
  2626. u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
  2627. mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n",
  2628. (unsigned long long)ino, cpos, len, p_cluster, ext_flags);
  2629. memset(&replace_rec, 0, sizeof(replace_rec));
  2630. replace_rec.e_cpos = cpu_to_le32(cpos);
  2631. replace_rec.e_leaf_clusters = cpu_to_le16(len);
  2632. replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
  2633. p_cluster));
  2634. replace_rec.e_flags = ext_flags;
  2635. replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
  2636. path = ocfs2_new_path_from_et(et);
  2637. if (!path) {
  2638. ret = -ENOMEM;
  2639. mlog_errno(ret);
  2640. goto out;
  2641. }
  2642. ret = ocfs2_find_path(et->et_ci, path, cpos);
  2643. if (ret) {
  2644. mlog_errno(ret);
  2645. goto out;
  2646. }
  2647. el = path_leaf_el(path);
  2648. index = ocfs2_search_extent_list(el, cpos);
  2649. if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
  2650. ocfs2_error(sb,
  2651. "Inode %llu has an extent at cpos %u which can no "
  2652. "longer be found.\n",
  2653. (unsigned long long)ino, cpos);
  2654. ret = -EROFS;
  2655. goto out;
  2656. }
  2657. ret = ocfs2_split_extent(handle, et, path, index,
  2658. &replace_rec, meta_ac, dealloc);
  2659. if (ret)
  2660. mlog_errno(ret);
  2661. out:
  2662. ocfs2_free_path(path);
  2663. return ret;
  2664. }
  2665. static int ocfs2_replace_clusters(handle_t *handle,
  2666. struct ocfs2_cow_context *context,
  2667. u32 cpos, u32 old,
  2668. u32 new, u32 len,
  2669. unsigned int ext_flags)
  2670. {
  2671. int ret;
  2672. struct ocfs2_caching_info *ci = context->data_et.et_ci;
  2673. u64 ino = ocfs2_metadata_cache_owner(ci);
  2674. mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n",
  2675. (unsigned long long)ino, cpos, old, new, len, ext_flags);
  2676. /*If the old clusters is unwritten, no need to duplicate. */
  2677. if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
  2678. ret = context->cow_duplicate_clusters(handle, context, cpos,
  2679. old, new, len);
  2680. if (ret) {
  2681. mlog_errno(ret);
  2682. goto out;
  2683. }
  2684. }
  2685. ret = ocfs2_clear_ext_refcount(handle, &context->data_et,
  2686. cpos, new, len, ext_flags,
  2687. context->meta_ac, &context->dealloc);
  2688. if (ret)
  2689. mlog_errno(ret);
  2690. out:
  2691. return ret;
  2692. }
  2693. static int ocfs2_cow_sync_writeback(struct super_block *sb,
  2694. struct ocfs2_cow_context *context,
  2695. u32 cpos, u32 num_clusters)
  2696. {
  2697. int ret = 0;
  2698. loff_t offset, end, map_end;
  2699. pgoff_t page_index;
  2700. struct page *page;
  2701. if (ocfs2_should_order_data(context->inode))
  2702. return 0;
  2703. offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
  2704. end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
  2705. ret = filemap_fdatawrite_range(context->inode->i_mapping,
  2706. offset, end - 1);
  2707. if (ret < 0) {
  2708. mlog_errno(ret);
  2709. return ret;
  2710. }
  2711. while (offset < end) {
  2712. page_index = offset >> PAGE_CACHE_SHIFT;
  2713. map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
  2714. if (map_end > end)
  2715. map_end = end;
  2716. page = grab_cache_page(context->inode->i_mapping, page_index);
  2717. BUG_ON(!page);
  2718. wait_on_page_writeback(page);
  2719. if (PageError(page)) {
  2720. ret = -EIO;
  2721. mlog_errno(ret);
  2722. } else
  2723. mark_page_accessed(page);
  2724. unlock_page(page);
  2725. page_cache_release(page);
  2726. page = NULL;
  2727. offset = map_end;
  2728. if (ret)
  2729. break;
  2730. }
  2731. return ret;
  2732. }
  2733. static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context,
  2734. u32 v_cluster, u32 *p_cluster,
  2735. u32 *num_clusters,
  2736. unsigned int *extent_flags)
  2737. {
  2738. return ocfs2_get_clusters(context->inode, v_cluster, p_cluster,
  2739. num_clusters, extent_flags);
  2740. }
  2741. static int ocfs2_make_clusters_writable(struct super_block *sb,
  2742. struct ocfs2_cow_context *context,
  2743. u32 cpos, u32 p_cluster,
  2744. u32 num_clusters, unsigned int e_flags)
  2745. {
  2746. int ret, delete, index, credits = 0;
  2747. u32 new_bit, new_len;
  2748. unsigned int set_len;
  2749. struct ocfs2_super *osb = OCFS2_SB(sb);
  2750. handle_t *handle;
  2751. struct buffer_head *ref_leaf_bh = NULL;
  2752. struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
  2753. struct ocfs2_refcount_rec rec;
  2754. mlog(0, "cpos %u, p_cluster %u, num_clusters %u, e_flags %u\n",
  2755. cpos, p_cluster, num_clusters, e_flags);
  2756. ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
  2757. &context->data_et,
  2758. ref_ci,
  2759. context->ref_root_bh,
  2760. &context->meta_ac,
  2761. &context->data_ac, &credits);
  2762. if (ret) {
  2763. mlog_errno(ret);
  2764. return ret;
  2765. }
  2766. if (context->post_refcount)
  2767. credits += context->post_refcount->credits;
  2768. credits += context->extra_credits;
  2769. handle = ocfs2_start_trans(osb, credits);
  2770. if (IS_ERR(handle)) {
  2771. ret = PTR_ERR(handle);
  2772. mlog_errno(ret);
  2773. goto out;
  2774. }
  2775. while (num_clusters) {
  2776. ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
  2777. p_cluster, num_clusters,
  2778. &rec, &index, &ref_leaf_bh);
  2779. if (ret) {
  2780. mlog_errno(ret);
  2781. goto out_commit;
  2782. }
  2783. BUG_ON(!rec.r_refcount);
  2784. set_len = min((u64)p_cluster + num_clusters,
  2785. le64_to_cpu(rec.r_cpos) +
  2786. le32_to_cpu(rec.r_clusters)) - p_cluster;
  2787. /*
  2788. * There are many different situation here.
  2789. * 1. If refcount == 1, remove the flag and don't COW.
  2790. * 2. If refcount > 1, allocate clusters.
  2791. * Here we may not allocate r_len once at a time, so continue
  2792. * until we reach num_clusters.
  2793. */
  2794. if (le32_to_cpu(rec.r_refcount) == 1) {
  2795. delete = 0;
  2796. ret = ocfs2_clear_ext_refcount(handle,
  2797. &context->data_et,
  2798. cpos, p_cluster,
  2799. set_len, e_flags,
  2800. context->meta_ac,
  2801. &context->dealloc);
  2802. if (ret) {
  2803. mlog_errno(ret);
  2804. goto out_commit;
  2805. }
  2806. } else {
  2807. delete = 1;
  2808. ret = __ocfs2_claim_clusters(handle,
  2809. context->data_ac,
  2810. 1, set_len,
  2811. &new_bit, &new_len);
  2812. if (ret) {
  2813. mlog_errno(ret);
  2814. goto out_commit;
  2815. }
  2816. ret = ocfs2_replace_clusters(handle, context,
  2817. cpos, p_cluster, new_bit,
  2818. new_len, e_flags);
  2819. if (ret) {
  2820. mlog_errno(ret);
  2821. goto out_commit;
  2822. }
  2823. set_len = new_len;
  2824. }
  2825. ret = __ocfs2_decrease_refcount(handle, ref_ci,
  2826. context->ref_root_bh,
  2827. p_cluster, set_len,
  2828. context->meta_ac,
  2829. &context->dealloc, delete);
  2830. if (ret) {
  2831. mlog_errno(ret);
  2832. goto out_commit;
  2833. }
  2834. cpos += set_len;
  2835. p_cluster += set_len;
  2836. num_clusters -= set_len;
  2837. brelse(ref_leaf_bh);
  2838. ref_leaf_bh = NULL;
  2839. }
  2840. /* handle any post_cow action. */
  2841. if (context->post_refcount && context->post_refcount->func) {
  2842. ret = context->post_refcount->func(context->inode, handle,
  2843. context->post_refcount->para);
  2844. if (ret) {
  2845. mlog_errno(ret);
  2846. goto out_commit;
  2847. }
  2848. }
  2849. /*
  2850. * Here we should write the new page out first if we are
  2851. * in write-back mode.
  2852. */
  2853. if (context->get_clusters == ocfs2_di_get_clusters) {
  2854. ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
  2855. if (ret)
  2856. mlog_errno(ret);
  2857. }
  2858. out_commit:
  2859. ocfs2_commit_trans(osb, handle);
  2860. out:
  2861. if (context->data_ac) {
  2862. ocfs2_free_alloc_context(context->data_ac);
  2863. context->data_ac = NULL;
  2864. }
  2865. if (context->meta_ac) {
  2866. ocfs2_free_alloc_context(context->meta_ac);
  2867. context->meta_ac = NULL;
  2868. }
  2869. brelse(ref_leaf_bh);
  2870. return ret;
  2871. }
  2872. static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
  2873. {
  2874. int ret = 0;
  2875. struct inode *inode = context->inode;
  2876. u32 cow_start = context->cow_start, cow_len = context->cow_len;
  2877. u32 p_cluster, num_clusters;
  2878. unsigned int ext_flags;
  2879. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  2880. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  2881. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  2882. "tree, but the feature bit is not set in the "
  2883. "super block.", inode->i_ino);
  2884. return -EROFS;
  2885. }
  2886. ocfs2_init_dealloc_ctxt(&context->dealloc);
  2887. while (cow_len) {
  2888. ret = context->get_clusters(context, cow_start, &p_cluster,
  2889. &num_clusters, &ext_flags);
  2890. if (ret) {
  2891. mlog_errno(ret);
  2892. break;
  2893. }
  2894. BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
  2895. if (cow_len < num_clusters)
  2896. num_clusters = cow_len;
  2897. ret = ocfs2_make_clusters_writable(inode->i_sb, context,
  2898. cow_start, p_cluster,
  2899. num_clusters, ext_flags);
  2900. if (ret) {
  2901. mlog_errno(ret);
  2902. break;
  2903. }
  2904. cow_len -= num_clusters;
  2905. cow_start += num_clusters;
  2906. }
  2907. if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
  2908. ocfs2_schedule_truncate_log_flush(osb, 1);
  2909. ocfs2_run_deallocs(osb, &context->dealloc);
  2910. }
  2911. return ret;
  2912. }
  2913. /*
  2914. * Starting at cpos, try to CoW write_len clusters. Don't CoW
  2915. * past max_cpos. This will stop when it runs into a hole or an
  2916. * unrefcounted extent.
  2917. */
  2918. static int ocfs2_refcount_cow_hunk(struct inode *inode,
  2919. struct buffer_head *di_bh,
  2920. u32 cpos, u32 write_len, u32 max_cpos)
  2921. {
  2922. int ret;
  2923. u32 cow_start = 0, cow_len = 0;
  2924. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  2925. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  2926. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  2927. struct buffer_head *ref_root_bh = NULL;
  2928. struct ocfs2_refcount_tree *ref_tree;
  2929. struct ocfs2_cow_context *context = NULL;
  2930. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  2931. ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list,
  2932. cpos, write_len, max_cpos,
  2933. &cow_start, &cow_len);
  2934. if (ret) {
  2935. mlog_errno(ret);
  2936. goto out;
  2937. }
  2938. mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, "
  2939. "cow_len %u\n", inode->i_ino,
  2940. cpos, write_len, cow_start, cow_len);
  2941. BUG_ON(cow_len == 0);
  2942. context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
  2943. if (!context) {
  2944. ret = -ENOMEM;
  2945. mlog_errno(ret);
  2946. goto out;
  2947. }
  2948. ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
  2949. 1, &ref_tree, &ref_root_bh);
  2950. if (ret) {
  2951. mlog_errno(ret);
  2952. goto out;
  2953. }
  2954. context->inode = inode;
  2955. context->cow_start = cow_start;
  2956. context->cow_len = cow_len;
  2957. context->ref_tree = ref_tree;
  2958. context->ref_root_bh = ref_root_bh;
  2959. context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
  2960. context->get_clusters = ocfs2_di_get_clusters;
  2961. ocfs2_init_dinode_extent_tree(&context->data_et,
  2962. INODE_CACHE(inode), di_bh);
  2963. ret = ocfs2_replace_cow(context);
  2964. if (ret)
  2965. mlog_errno(ret);
  2966. /*
  2967. * truncate the extent map here since no matter whether we meet with
  2968. * any error during the action, we shouldn't trust cached extent map
  2969. * any more.
  2970. */
  2971. ocfs2_extent_map_trunc(inode, cow_start);
  2972. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  2973. brelse(ref_root_bh);
  2974. out:
  2975. kfree(context);
  2976. return ret;
  2977. }
  2978. /*
  2979. * CoW any and all clusters between cpos and cpos+write_len.
  2980. * Don't CoW past max_cpos. If this returns successfully, all
  2981. * clusters between cpos and cpos+write_len are safe to modify.
  2982. */
  2983. int ocfs2_refcount_cow(struct inode *inode,
  2984. struct buffer_head *di_bh,
  2985. u32 cpos, u32 write_len, u32 max_cpos)
  2986. {
  2987. int ret = 0;
  2988. u32 p_cluster, num_clusters;
  2989. unsigned int ext_flags;
  2990. while (write_len) {
  2991. ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
  2992. &num_clusters, &ext_flags);
  2993. if (ret) {
  2994. mlog_errno(ret);
  2995. break;
  2996. }
  2997. if (write_len < num_clusters)
  2998. num_clusters = write_len;
  2999. if (ext_flags & OCFS2_EXT_REFCOUNTED) {
  3000. ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
  3001. num_clusters, max_cpos);
  3002. if (ret) {
  3003. mlog_errno(ret);
  3004. break;
  3005. }
  3006. }
  3007. write_len -= num_clusters;
  3008. cpos += num_clusters;
  3009. }
  3010. return ret;
  3011. }
  3012. static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context,
  3013. u32 v_cluster, u32 *p_cluster,
  3014. u32 *num_clusters,
  3015. unsigned int *extent_flags)
  3016. {
  3017. struct inode *inode = context->inode;
  3018. struct ocfs2_xattr_value_root *xv = context->cow_object;
  3019. return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster,
  3020. num_clusters, &xv->xr_list,
  3021. extent_flags);
  3022. }
  3023. /*
  3024. * Given a xattr value root, calculate the most meta/credits we need for
  3025. * refcount tree change if we truncate it to 0.
  3026. */
  3027. int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
  3028. struct ocfs2_caching_info *ref_ci,
  3029. struct buffer_head *ref_root_bh,
  3030. struct ocfs2_xattr_value_root *xv,
  3031. int *meta_add, int *credits)
  3032. {
  3033. int ret = 0, index, ref_blocks = 0;
  3034. u32 p_cluster, num_clusters;
  3035. u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters);
  3036. struct ocfs2_refcount_block *rb;
  3037. struct ocfs2_refcount_rec rec;
  3038. struct buffer_head *ref_leaf_bh = NULL;
  3039. while (cpos < clusters) {
  3040. ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
  3041. &num_clusters, &xv->xr_list,
  3042. NULL);
  3043. if (ret) {
  3044. mlog_errno(ret);
  3045. goto out;
  3046. }
  3047. cpos += num_clusters;
  3048. while (num_clusters) {
  3049. ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh,
  3050. p_cluster, num_clusters,
  3051. &rec, &index,
  3052. &ref_leaf_bh);
  3053. if (ret) {
  3054. mlog_errno(ret);
  3055. goto out;
  3056. }
  3057. BUG_ON(!rec.r_refcount);
  3058. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  3059. /*
  3060. * We really don't know whether the other clusters is in
  3061. * this refcount block or not, so just take the worst
  3062. * case that all the clusters are in this block and each
  3063. * one will split a refcount rec, so totally we need
  3064. * clusters * 2 new refcount rec.
  3065. */
  3066. if (le64_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
  3067. le16_to_cpu(rb->rf_records.rl_count))
  3068. ref_blocks++;
  3069. *credits += 1;
  3070. brelse(ref_leaf_bh);
  3071. ref_leaf_bh = NULL;
  3072. if (num_clusters <= le32_to_cpu(rec.r_clusters))
  3073. break;
  3074. else
  3075. num_clusters -= le32_to_cpu(rec.r_clusters);
  3076. p_cluster += num_clusters;
  3077. }
  3078. }
  3079. *meta_add += ref_blocks;
  3080. if (!ref_blocks)
  3081. goto out;
  3082. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  3083. if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
  3084. *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
  3085. else {
  3086. struct ocfs2_extent_tree et;
  3087. ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
  3088. *credits += ocfs2_calc_extend_credits(inode->i_sb,
  3089. et.et_root_el,
  3090. ref_blocks);
  3091. }
  3092. out:
  3093. brelse(ref_leaf_bh);
  3094. return ret;
  3095. }
  3096. /*
  3097. * Do CoW for xattr.
  3098. */
  3099. int ocfs2_refcount_cow_xattr(struct inode *inode,
  3100. struct ocfs2_dinode *di,
  3101. struct ocfs2_xattr_value_buf *vb,
  3102. struct ocfs2_refcount_tree *ref_tree,
  3103. struct buffer_head *ref_root_bh,
  3104. u32 cpos, u32 write_len,
  3105. struct ocfs2_post_refcount *post)
  3106. {
  3107. int ret;
  3108. struct ocfs2_xattr_value_root *xv = vb->vb_xv;
  3109. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  3110. struct ocfs2_cow_context *context = NULL;
  3111. u32 cow_start, cow_len;
  3112. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  3113. ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list,
  3114. cpos, write_len, UINT_MAX,
  3115. &cow_start, &cow_len);
  3116. if (ret) {
  3117. mlog_errno(ret);
  3118. goto out;
  3119. }
  3120. BUG_ON(cow_len == 0);
  3121. context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
  3122. if (!context) {
  3123. ret = -ENOMEM;
  3124. mlog_errno(ret);
  3125. goto out;
  3126. }
  3127. context->inode = inode;
  3128. context->cow_start = cow_start;
  3129. context->cow_len = cow_len;
  3130. context->ref_tree = ref_tree;
  3131. context->ref_root_bh = ref_root_bh;;
  3132. context->cow_object = xv;
  3133. context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd;
  3134. /* We need the extra credits for duplicate_clusters by jbd. */
  3135. context->extra_credits =
  3136. ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len;
  3137. context->get_clusters = ocfs2_xattr_value_get_clusters;
  3138. context->post_refcount = post;
  3139. ocfs2_init_xattr_value_extent_tree(&context->data_et,
  3140. INODE_CACHE(inode), vb);
  3141. ret = ocfs2_replace_cow(context);
  3142. if (ret)
  3143. mlog_errno(ret);
  3144. out:
  3145. kfree(context);
  3146. return ret;
  3147. }
  3148. /*
  3149. * Insert a new extent into refcount tree and mark a extent rec
  3150. * as refcounted in the dinode tree.
  3151. */
  3152. int ocfs2_add_refcount_flag(struct inode *inode,
  3153. struct ocfs2_extent_tree *data_et,
  3154. struct ocfs2_caching_info *ref_ci,
  3155. struct buffer_head *ref_root_bh,
  3156. u32 cpos, u32 p_cluster, u32 num_clusters,
  3157. struct ocfs2_cached_dealloc_ctxt *dealloc,
  3158. struct ocfs2_post_refcount *post)
  3159. {
  3160. int ret;
  3161. handle_t *handle;
  3162. int credits = 1, ref_blocks = 0;
  3163. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3164. struct ocfs2_alloc_context *meta_ac = NULL;
  3165. ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
  3166. ref_ci, ref_root_bh,
  3167. p_cluster, num_clusters,
  3168. &ref_blocks, &credits);
  3169. if (ret) {
  3170. mlog_errno(ret);
  3171. goto out;
  3172. }
  3173. mlog(0, "reserve new metadata %d, credits = %d\n",
  3174. ref_blocks, credits);
  3175. if (ref_blocks) {
  3176. ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
  3177. ref_blocks, &meta_ac);
  3178. if (ret) {
  3179. mlog_errno(ret);
  3180. goto out;
  3181. }
  3182. }
  3183. if (post)
  3184. credits += post->credits;
  3185. handle = ocfs2_start_trans(osb, credits);
  3186. if (IS_ERR(handle)) {
  3187. ret = PTR_ERR(handle);
  3188. mlog_errno(ret);
  3189. goto out;
  3190. }
  3191. ret = ocfs2_mark_extent_refcounted(inode, data_et, handle,
  3192. cpos, num_clusters, p_cluster,
  3193. meta_ac, dealloc);
  3194. if (ret) {
  3195. mlog_errno(ret);
  3196. goto out_commit;
  3197. }
  3198. ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
  3199. p_cluster, num_clusters, 0,
  3200. meta_ac, dealloc);
  3201. if (ret) {
  3202. mlog_errno(ret);
  3203. goto out_commit;
  3204. }
  3205. if (post && post->func) {
  3206. ret = post->func(inode, handle, post->para);
  3207. if (ret)
  3208. mlog_errno(ret);
  3209. }
  3210. out_commit:
  3211. ocfs2_commit_trans(osb, handle);
  3212. out:
  3213. if (meta_ac)
  3214. ocfs2_free_alloc_context(meta_ac);
  3215. return ret;
  3216. }
  3217. static int ocfs2_change_ctime(struct inode *inode,
  3218. struct buffer_head *di_bh)
  3219. {
  3220. int ret;
  3221. handle_t *handle;
  3222. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  3223. handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
  3224. OCFS2_INODE_UPDATE_CREDITS);
  3225. if (IS_ERR(handle)) {
  3226. ret = PTR_ERR(handle);
  3227. mlog_errno(ret);
  3228. goto out;
  3229. }
  3230. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  3231. OCFS2_JOURNAL_ACCESS_WRITE);
  3232. if (ret) {
  3233. mlog_errno(ret);
  3234. goto out_commit;
  3235. }
  3236. inode->i_ctime = CURRENT_TIME;
  3237. di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  3238. di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  3239. ocfs2_journal_dirty(handle, di_bh);
  3240. out_commit:
  3241. ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
  3242. out:
  3243. return ret;
  3244. }
  3245. static int ocfs2_attach_refcount_tree(struct inode *inode,
  3246. struct buffer_head *di_bh)
  3247. {
  3248. int ret, data_changed = 0;
  3249. struct buffer_head *ref_root_bh = NULL;
  3250. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  3251. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  3252. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3253. struct ocfs2_refcount_tree *ref_tree;
  3254. unsigned int ext_flags;
  3255. loff_t size;
  3256. u32 cpos, num_clusters, clusters, p_cluster;
  3257. struct ocfs2_cached_dealloc_ctxt dealloc;
  3258. struct ocfs2_extent_tree di_et;
  3259. ocfs2_init_dealloc_ctxt(&dealloc);
  3260. if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) {
  3261. ret = ocfs2_create_refcount_tree(inode, di_bh);
  3262. if (ret) {
  3263. mlog_errno(ret);
  3264. goto out;
  3265. }
  3266. }
  3267. BUG_ON(!di->i_refcount_loc);
  3268. ret = ocfs2_lock_refcount_tree(osb,
  3269. le64_to_cpu(di->i_refcount_loc), 1,
  3270. &ref_tree, &ref_root_bh);
  3271. if (ret) {
  3272. mlog_errno(ret);
  3273. goto out;
  3274. }
  3275. if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
  3276. goto attach_xattr;
  3277. ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
  3278. size = i_size_read(inode);
  3279. clusters = ocfs2_clusters_for_bytes(inode->i_sb, size);
  3280. cpos = 0;
  3281. while (cpos < clusters) {
  3282. ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
  3283. &num_clusters, &ext_flags);
  3284. if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
  3285. ret = ocfs2_add_refcount_flag(inode, &di_et,
  3286. &ref_tree->rf_ci,
  3287. ref_root_bh, cpos,
  3288. p_cluster, num_clusters,
  3289. &dealloc, NULL);
  3290. if (ret) {
  3291. mlog_errno(ret);
  3292. goto unlock;
  3293. }
  3294. data_changed = 1;
  3295. }
  3296. cpos += num_clusters;
  3297. }
  3298. attach_xattr:
  3299. if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
  3300. ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
  3301. &ref_tree->rf_ci,
  3302. ref_root_bh,
  3303. &dealloc);
  3304. if (ret) {
  3305. mlog_errno(ret);
  3306. goto unlock;
  3307. }
  3308. }
  3309. if (data_changed) {
  3310. ret = ocfs2_change_ctime(inode, di_bh);
  3311. if (ret)
  3312. mlog_errno(ret);
  3313. }
  3314. unlock:
  3315. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  3316. brelse(ref_root_bh);
  3317. if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) {
  3318. ocfs2_schedule_truncate_log_flush(osb, 1);
  3319. ocfs2_run_deallocs(osb, &dealloc);
  3320. }
  3321. out:
  3322. /*
  3323. * Empty the extent map so that we may get the right extent
  3324. * record from the disk.
  3325. */
  3326. ocfs2_extent_map_trunc(inode, 0);
  3327. return ret;
  3328. }
  3329. static int ocfs2_add_refcounted_extent(struct inode *inode,
  3330. struct ocfs2_extent_tree *et,
  3331. struct ocfs2_caching_info *ref_ci,
  3332. struct buffer_head *ref_root_bh,
  3333. u32 cpos, u32 p_cluster, u32 num_clusters,
  3334. unsigned int ext_flags,
  3335. struct ocfs2_cached_dealloc_ctxt *dealloc)
  3336. {
  3337. int ret;
  3338. handle_t *handle;
  3339. int credits = 0;
  3340. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3341. struct ocfs2_alloc_context *meta_ac = NULL;
  3342. ret = ocfs2_lock_refcount_allocators(inode->i_sb,
  3343. p_cluster, num_clusters,
  3344. et, ref_ci,
  3345. ref_root_bh, &meta_ac,
  3346. NULL, &credits);
  3347. if (ret) {
  3348. mlog_errno(ret);
  3349. goto out;
  3350. }
  3351. handle = ocfs2_start_trans(osb, credits);
  3352. if (IS_ERR(handle)) {
  3353. ret = PTR_ERR(handle);
  3354. mlog_errno(ret);
  3355. goto out;
  3356. }
  3357. ret = ocfs2_insert_extent(handle, et, cpos,
  3358. ocfs2_clusters_to_blocks(inode->i_sb, p_cluster),
  3359. num_clusters, ext_flags, meta_ac);
  3360. if (ret) {
  3361. mlog_errno(ret);
  3362. goto out_commit;
  3363. }
  3364. ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
  3365. p_cluster, num_clusters,
  3366. meta_ac, dealloc);
  3367. if (ret)
  3368. mlog_errno(ret);
  3369. out_commit:
  3370. ocfs2_commit_trans(osb, handle);
  3371. out:
  3372. if (meta_ac)
  3373. ocfs2_free_alloc_context(meta_ac);
  3374. return ret;
  3375. }
  3376. static int ocfs2_duplicate_inline_data(struct inode *s_inode,
  3377. struct buffer_head *s_bh,
  3378. struct inode *t_inode,
  3379. struct buffer_head *t_bh)
  3380. {
  3381. int ret;
  3382. handle_t *handle;
  3383. struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
  3384. struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
  3385. struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
  3386. BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
  3387. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
  3388. if (IS_ERR(handle)) {
  3389. ret = PTR_ERR(handle);
  3390. mlog_errno(ret);
  3391. goto out;
  3392. }
  3393. ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
  3394. OCFS2_JOURNAL_ACCESS_WRITE);
  3395. if (ret) {
  3396. mlog_errno(ret);
  3397. goto out_commit;
  3398. }
  3399. t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
  3400. memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
  3401. le16_to_cpu(s_di->id2.i_data.id_count));
  3402. spin_lock(&OCFS2_I(t_inode)->ip_lock);
  3403. OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
  3404. t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
  3405. spin_unlock(&OCFS2_I(t_inode)->ip_lock);
  3406. ocfs2_journal_dirty(handle, t_bh);
  3407. out_commit:
  3408. ocfs2_commit_trans(osb, handle);
  3409. out:
  3410. return ret;
  3411. }
  3412. static int ocfs2_duplicate_extent_list(struct inode *s_inode,
  3413. struct inode *t_inode,
  3414. struct buffer_head *t_bh,
  3415. struct ocfs2_caching_info *ref_ci,
  3416. struct buffer_head *ref_root_bh,
  3417. struct ocfs2_cached_dealloc_ctxt *dealloc)
  3418. {
  3419. int ret = 0;
  3420. u32 p_cluster, num_clusters, clusters, cpos;
  3421. loff_t size;
  3422. unsigned int ext_flags;
  3423. struct ocfs2_extent_tree et;
  3424. ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh);
  3425. size = i_size_read(s_inode);
  3426. clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size);
  3427. cpos = 0;
  3428. while (cpos < clusters) {
  3429. ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
  3430. &num_clusters, &ext_flags);
  3431. if (p_cluster) {
  3432. ret = ocfs2_add_refcounted_extent(t_inode, &et,
  3433. ref_ci, ref_root_bh,
  3434. cpos, p_cluster,
  3435. num_clusters,
  3436. ext_flags,
  3437. dealloc);
  3438. if (ret) {
  3439. mlog_errno(ret);
  3440. goto out;
  3441. }
  3442. }
  3443. cpos += num_clusters;
  3444. }
  3445. out:
  3446. return ret;
  3447. }
  3448. /*
  3449. * change the new file's attributes to the src.
  3450. *
  3451. * reflink creates a snapshot of a file, that means the attributes
  3452. * must be identical except for three exceptions - nlink, ino, and ctime.
  3453. */
  3454. static int ocfs2_complete_reflink(struct inode *s_inode,
  3455. struct buffer_head *s_bh,
  3456. struct inode *t_inode,
  3457. struct buffer_head *t_bh,
  3458. bool preserve)
  3459. {
  3460. int ret;
  3461. handle_t *handle;
  3462. struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
  3463. struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data;
  3464. loff_t size = i_size_read(s_inode);
  3465. handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb),
  3466. OCFS2_INODE_UPDATE_CREDITS);
  3467. if (IS_ERR(handle)) {
  3468. ret = PTR_ERR(handle);
  3469. mlog_errno(ret);
  3470. return ret;
  3471. }
  3472. ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
  3473. OCFS2_JOURNAL_ACCESS_WRITE);
  3474. if (ret) {
  3475. mlog_errno(ret);
  3476. goto out_commit;
  3477. }
  3478. spin_lock(&OCFS2_I(t_inode)->ip_lock);
  3479. OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters;
  3480. OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr;
  3481. OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
  3482. spin_unlock(&OCFS2_I(t_inode)->ip_lock);
  3483. i_size_write(t_inode, size);
  3484. t_inode->i_blocks = s_inode->i_blocks;
  3485. di->i_xattr_inline_size = s_di->i_xattr_inline_size;
  3486. di->i_clusters = s_di->i_clusters;
  3487. di->i_size = s_di->i_size;
  3488. di->i_dyn_features = s_di->i_dyn_features;
  3489. di->i_attr = s_di->i_attr;
  3490. if (preserve) {
  3491. t_inode->i_uid = s_inode->i_uid;
  3492. t_inode->i_gid = s_inode->i_gid;
  3493. t_inode->i_mode = s_inode->i_mode;
  3494. di->i_uid = s_di->i_uid;
  3495. di->i_gid = s_di->i_gid;
  3496. di->i_mode = s_di->i_mode;
  3497. /*
  3498. * update time.
  3499. * we want mtime to appear identical to the source and
  3500. * update ctime.
  3501. */
  3502. t_inode->i_ctime = CURRENT_TIME;
  3503. di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
  3504. di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
  3505. t_inode->i_mtime = s_inode->i_mtime;
  3506. di->i_mtime = s_di->i_mtime;
  3507. di->i_mtime_nsec = s_di->i_mtime_nsec;
  3508. }
  3509. ocfs2_journal_dirty(handle, t_bh);
  3510. out_commit:
  3511. ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle);
  3512. return ret;
  3513. }
  3514. static int ocfs2_create_reflink_node(struct inode *s_inode,
  3515. struct buffer_head *s_bh,
  3516. struct inode *t_inode,
  3517. struct buffer_head *t_bh,
  3518. bool preserve)
  3519. {
  3520. int ret;
  3521. struct buffer_head *ref_root_bh = NULL;
  3522. struct ocfs2_cached_dealloc_ctxt dealloc;
  3523. struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
  3524. struct ocfs2_refcount_block *rb;
  3525. struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
  3526. struct ocfs2_refcount_tree *ref_tree;
  3527. ocfs2_init_dealloc_ctxt(&dealloc);
  3528. ret = ocfs2_set_refcount_tree(t_inode, t_bh,
  3529. le64_to_cpu(di->i_refcount_loc));
  3530. if (ret) {
  3531. mlog_errno(ret);
  3532. goto out;
  3533. }
  3534. if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
  3535. ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
  3536. t_inode, t_bh);
  3537. if (ret)
  3538. mlog_errno(ret);
  3539. goto out;
  3540. }
  3541. ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
  3542. 1, &ref_tree, &ref_root_bh);
  3543. if (ret) {
  3544. mlog_errno(ret);
  3545. goto out;
  3546. }
  3547. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  3548. ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
  3549. &ref_tree->rf_ci, ref_root_bh,
  3550. &dealloc);
  3551. if (ret) {
  3552. mlog_errno(ret);
  3553. goto out_unlock_refcount;
  3554. }
  3555. out_unlock_refcount:
  3556. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  3557. brelse(ref_root_bh);
  3558. out:
  3559. if (ocfs2_dealloc_has_cluster(&dealloc)) {
  3560. ocfs2_schedule_truncate_log_flush(osb, 1);
  3561. ocfs2_run_deallocs(osb, &dealloc);
  3562. }
  3563. return ret;
  3564. }
  3565. static int __ocfs2_reflink(struct dentry *old_dentry,
  3566. struct buffer_head *old_bh,
  3567. struct inode *new_inode,
  3568. bool preserve)
  3569. {
  3570. int ret;
  3571. struct inode *inode = old_dentry->d_inode;
  3572. struct buffer_head *new_bh = NULL;
  3573. if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
  3574. ret = -EINVAL;
  3575. mlog_errno(ret);
  3576. goto out;
  3577. }
  3578. ret = filemap_fdatawrite(inode->i_mapping);
  3579. if (ret) {
  3580. mlog_errno(ret);
  3581. goto out;
  3582. }
  3583. ret = ocfs2_attach_refcount_tree(inode, old_bh);
  3584. if (ret) {
  3585. mlog_errno(ret);
  3586. goto out;
  3587. }
  3588. mutex_lock(&new_inode->i_mutex);
  3589. ret = ocfs2_inode_lock(new_inode, &new_bh, 1);
  3590. if (ret) {
  3591. mlog_errno(ret);
  3592. goto out_unlock;
  3593. }
  3594. ret = ocfs2_create_reflink_node(inode, old_bh,
  3595. new_inode, new_bh, preserve);
  3596. if (ret) {
  3597. mlog_errno(ret);
  3598. goto inode_unlock;
  3599. }
  3600. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
  3601. ret = ocfs2_reflink_xattrs(inode, old_bh,
  3602. new_inode, new_bh,
  3603. preserve);
  3604. if (ret) {
  3605. mlog_errno(ret);
  3606. goto inode_unlock;
  3607. }
  3608. }
  3609. ret = ocfs2_complete_reflink(inode, old_bh,
  3610. new_inode, new_bh, preserve);
  3611. if (ret)
  3612. mlog_errno(ret);
  3613. inode_unlock:
  3614. ocfs2_inode_unlock(new_inode, 1);
  3615. brelse(new_bh);
  3616. out_unlock:
  3617. mutex_unlock(&new_inode->i_mutex);
  3618. out:
  3619. if (!ret) {
  3620. ret = filemap_fdatawait(inode->i_mapping);
  3621. if (ret)
  3622. mlog_errno(ret);
  3623. }
  3624. return ret;
  3625. }
  3626. static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
  3627. struct dentry *new_dentry, bool preserve)
  3628. {
  3629. int error;
  3630. struct inode *inode = old_dentry->d_inode;
  3631. struct buffer_head *old_bh = NULL;
  3632. struct inode *new_orphan_inode = NULL;
  3633. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
  3634. return -EOPNOTSUPP;
  3635. error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
  3636. &new_orphan_inode);
  3637. if (error) {
  3638. mlog_errno(error);
  3639. goto out;
  3640. }
  3641. error = ocfs2_inode_lock(inode, &old_bh, 1);
  3642. if (error) {
  3643. mlog_errno(error);
  3644. goto out;
  3645. }
  3646. down_write(&OCFS2_I(inode)->ip_xattr_sem);
  3647. down_write(&OCFS2_I(inode)->ip_alloc_sem);
  3648. error = __ocfs2_reflink(old_dentry, old_bh,
  3649. new_orphan_inode, preserve);
  3650. up_write(&OCFS2_I(inode)->ip_alloc_sem);
  3651. up_write(&OCFS2_I(inode)->ip_xattr_sem);
  3652. ocfs2_inode_unlock(inode, 1);
  3653. brelse(old_bh);
  3654. if (error) {
  3655. mlog_errno(error);
  3656. goto out;
  3657. }
  3658. /* If the security isn't preserved, we need to re-initialize them. */
  3659. if (!preserve) {
  3660. error = ocfs2_init_security_and_acl(dir, new_orphan_inode);
  3661. if (error)
  3662. mlog_errno(error);
  3663. }
  3664. out:
  3665. if (!error) {
  3666. error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
  3667. new_dentry);
  3668. if (error)
  3669. mlog_errno(error);
  3670. }
  3671. if (new_orphan_inode) {
  3672. /*
  3673. * We need to open_unlock the inode no matter whether we
  3674. * succeed or not, so that other nodes can delete it later.
  3675. */
  3676. ocfs2_open_unlock(new_orphan_inode);
  3677. if (error)
  3678. iput(new_orphan_inode);
  3679. }
  3680. return error;
  3681. }
  3682. /*
  3683. * Below here are the bits used by OCFS2_IOC_REFLINK() to fake
  3684. * sys_reflink(). This will go away when vfs_reflink() exists in
  3685. * fs/namei.c.
  3686. */
  3687. /* copied from may_create in VFS. */
  3688. static inline int ocfs2_may_create(struct inode *dir, struct dentry *child)
  3689. {
  3690. if (child->d_inode)
  3691. return -EEXIST;
  3692. if (IS_DEADDIR(dir))
  3693. return -ENOENT;
  3694. return inode_permission(dir, MAY_WRITE | MAY_EXEC);
  3695. }
  3696. /* copied from user_path_parent. */
  3697. static int ocfs2_user_path_parent(const char __user *path,
  3698. struct nameidata *nd, char **name)
  3699. {
  3700. char *s = getname(path);
  3701. int error;
  3702. if (IS_ERR(s))
  3703. return PTR_ERR(s);
  3704. error = path_lookup(s, LOOKUP_PARENT, nd);
  3705. if (error)
  3706. putname(s);
  3707. else
  3708. *name = s;
  3709. return error;
  3710. }
  3711. /**
  3712. * ocfs2_vfs_reflink - Create a reference-counted link
  3713. *
  3714. * @old_dentry: source dentry + inode
  3715. * @dir: directory to create the target
  3716. * @new_dentry: target dentry
  3717. * @preserve: if true, preserve all file attributes
  3718. */
  3719. static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
  3720. struct dentry *new_dentry, bool preserve)
  3721. {
  3722. struct inode *inode = old_dentry->d_inode;
  3723. int error;
  3724. if (!inode)
  3725. return -ENOENT;
  3726. error = ocfs2_may_create(dir, new_dentry);
  3727. if (error)
  3728. return error;
  3729. if (dir->i_sb != inode->i_sb)
  3730. return -EXDEV;
  3731. /*
  3732. * A reflink to an append-only or immutable file cannot be created.
  3733. */
  3734. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  3735. return -EPERM;
  3736. /* Only regular files can be reflinked. */
  3737. if (!S_ISREG(inode->i_mode))
  3738. return -EPERM;
  3739. /*
  3740. * If the caller wants to preserve ownership, they require the
  3741. * rights to do so.
  3742. */
  3743. if (preserve) {
  3744. if ((current_fsuid() != inode->i_uid) && !capable(CAP_CHOWN))
  3745. return -EPERM;
  3746. if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN))
  3747. return -EPERM;
  3748. }
  3749. /*
  3750. * If the caller is modifying any aspect of the attributes, they
  3751. * are not creating a snapshot. They need read permission on the
  3752. * file.
  3753. */
  3754. if (!preserve) {
  3755. error = inode_permission(inode, MAY_READ);
  3756. if (error)
  3757. return error;
  3758. }
  3759. mutex_lock(&inode->i_mutex);
  3760. dquot_initialize(dir);
  3761. error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
  3762. mutex_unlock(&inode->i_mutex);
  3763. if (!error)
  3764. fsnotify_create(dir, new_dentry);
  3765. return error;
  3766. }
  3767. /*
  3768. * Most codes are copied from sys_linkat.
  3769. */
  3770. int ocfs2_reflink_ioctl(struct inode *inode,
  3771. const char __user *oldname,
  3772. const char __user *newname,
  3773. bool preserve)
  3774. {
  3775. struct dentry *new_dentry;
  3776. struct nameidata nd;
  3777. struct path old_path;
  3778. int error;
  3779. char *to = NULL;
  3780. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
  3781. return -EOPNOTSUPP;
  3782. error = user_path_at(AT_FDCWD, oldname, 0, &old_path);
  3783. if (error) {
  3784. mlog_errno(error);
  3785. return error;
  3786. }
  3787. error = ocfs2_user_path_parent(newname, &nd, &to);
  3788. if (error) {
  3789. mlog_errno(error);
  3790. goto out;
  3791. }
  3792. error = -EXDEV;
  3793. if (old_path.mnt != nd.path.mnt)
  3794. goto out_release;
  3795. new_dentry = lookup_create(&nd, 0);
  3796. error = PTR_ERR(new_dentry);
  3797. if (IS_ERR(new_dentry)) {
  3798. mlog_errno(error);
  3799. goto out_unlock;
  3800. }
  3801. error = mnt_want_write(nd.path.mnt);
  3802. if (error) {
  3803. mlog_errno(error);
  3804. goto out_dput;
  3805. }
  3806. error = ocfs2_vfs_reflink(old_path.dentry,
  3807. nd.path.dentry->d_inode,
  3808. new_dentry, preserve);
  3809. mnt_drop_write(nd.path.mnt);
  3810. out_dput:
  3811. dput(new_dentry);
  3812. out_unlock:
  3813. mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
  3814. out_release:
  3815. path_put(&nd.path);
  3816. putname(to);
  3817. out:
  3818. path_put(&old_path);
  3819. return error;
  3820. }