refcounttree.c 103 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * refcounttree.c
  5. *
  6. * Copyright (C) 2009 Oracle. All rights reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public
  10. * License version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <linux/sort.h>
  18. #define MLOG_MASK_PREFIX ML_REFCOUNT
  19. #include <cluster/masklog.h>
  20. #include "ocfs2.h"
  21. #include "inode.h"
  22. #include "alloc.h"
  23. #include "suballoc.h"
  24. #include "journal.h"
  25. #include "uptodate.h"
  26. #include "super.h"
  27. #include "buffer_head_io.h"
  28. #include "blockcheck.h"
  29. #include "refcounttree.h"
  30. #include "sysfile.h"
  31. #include "dlmglue.h"
  32. #include "extent_map.h"
  33. #include "aops.h"
  34. #include "xattr.h"
  35. #include <linux/bio.h>
  36. #include <linux/blkdev.h>
  37. #include <linux/gfp.h>
  38. #include <linux/slab.h>
  39. #include <linux/writeback.h>
  40. #include <linux/pagevec.h>
  41. #include <linux/swap.h>
  42. struct ocfs2_cow_context {
  43. struct inode *inode;
  44. u32 cow_start;
  45. u32 cow_len;
  46. struct ocfs2_extent_tree data_et;
  47. struct ocfs2_refcount_tree *ref_tree;
  48. struct buffer_head *ref_root_bh;
  49. struct ocfs2_alloc_context *meta_ac;
  50. struct ocfs2_alloc_context *data_ac;
  51. struct ocfs2_cached_dealloc_ctxt dealloc;
  52. void *cow_object;
  53. struct ocfs2_post_refcount *post_refcount;
  54. int extra_credits;
  55. int (*get_clusters)(struct ocfs2_cow_context *context,
  56. u32 v_cluster, u32 *p_cluster,
  57. u32 *num_clusters,
  58. unsigned int *extent_flags);
  59. int (*cow_duplicate_clusters)(handle_t *handle,
  60. struct ocfs2_cow_context *context,
  61. u32 cpos, u32 old_cluster,
  62. u32 new_cluster, u32 new_len);
  63. };
  64. static inline struct ocfs2_refcount_tree *
  65. cache_info_to_refcount(struct ocfs2_caching_info *ci)
  66. {
  67. return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
  68. }
  69. static int ocfs2_validate_refcount_block(struct super_block *sb,
  70. struct buffer_head *bh)
  71. {
  72. int rc;
  73. struct ocfs2_refcount_block *rb =
  74. (struct ocfs2_refcount_block *)bh->b_data;
  75. mlog(0, "Validating refcount block %llu\n",
  76. (unsigned long long)bh->b_blocknr);
  77. BUG_ON(!buffer_uptodate(bh));
  78. /*
  79. * If the ecc fails, we return the error but otherwise
  80. * leave the filesystem running. We know any error is
  81. * local to this block.
  82. */
  83. rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
  84. if (rc) {
  85. mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
  86. (unsigned long long)bh->b_blocknr);
  87. return rc;
  88. }
  89. if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
  90. ocfs2_error(sb,
  91. "Refcount block #%llu has bad signature %.*s",
  92. (unsigned long long)bh->b_blocknr, 7,
  93. rb->rf_signature);
  94. return -EINVAL;
  95. }
  96. if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
  97. ocfs2_error(sb,
  98. "Refcount block #%llu has an invalid rf_blkno "
  99. "of %llu",
  100. (unsigned long long)bh->b_blocknr,
  101. (unsigned long long)le64_to_cpu(rb->rf_blkno));
  102. return -EINVAL;
  103. }
  104. if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
  105. ocfs2_error(sb,
  106. "Refcount block #%llu has an invalid "
  107. "rf_fs_generation of #%u",
  108. (unsigned long long)bh->b_blocknr,
  109. le32_to_cpu(rb->rf_fs_generation));
  110. return -EINVAL;
  111. }
  112. return 0;
  113. }
  114. static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
  115. u64 rb_blkno,
  116. struct buffer_head **bh)
  117. {
  118. int rc;
  119. struct buffer_head *tmp = *bh;
  120. rc = ocfs2_read_block(ci, rb_blkno, &tmp,
  121. ocfs2_validate_refcount_block);
  122. /* If ocfs2_read_block() got us a new bh, pass it up. */
  123. if (!rc && !*bh)
  124. *bh = tmp;
  125. return rc;
  126. }
  127. static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
  128. {
  129. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  130. return rf->rf_blkno;
  131. }
  132. static struct super_block *
  133. ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
  134. {
  135. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  136. return rf->rf_sb;
  137. }
  138. static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
  139. {
  140. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  141. spin_lock(&rf->rf_lock);
  142. }
  143. static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
  144. {
  145. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  146. spin_unlock(&rf->rf_lock);
  147. }
  148. static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
  149. {
  150. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  151. mutex_lock(&rf->rf_io_mutex);
  152. }
  153. static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
  154. {
  155. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  156. mutex_unlock(&rf->rf_io_mutex);
  157. }
  158. static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
  159. .co_owner = ocfs2_refcount_cache_owner,
  160. .co_get_super = ocfs2_refcount_cache_get_super,
  161. .co_cache_lock = ocfs2_refcount_cache_lock,
  162. .co_cache_unlock = ocfs2_refcount_cache_unlock,
  163. .co_io_lock = ocfs2_refcount_cache_io_lock,
  164. .co_io_unlock = ocfs2_refcount_cache_io_unlock,
  165. };
  166. static struct ocfs2_refcount_tree *
  167. ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
  168. {
  169. struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
  170. struct ocfs2_refcount_tree *tree = NULL;
  171. while (n) {
  172. tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
  173. if (blkno < tree->rf_blkno)
  174. n = n->rb_left;
  175. else if (blkno > tree->rf_blkno)
  176. n = n->rb_right;
  177. else
  178. return tree;
  179. }
  180. return NULL;
  181. }
  182. /* osb_lock is already locked. */
  183. static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
  184. struct ocfs2_refcount_tree *new)
  185. {
  186. u64 rf_blkno = new->rf_blkno;
  187. struct rb_node *parent = NULL;
  188. struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
  189. struct ocfs2_refcount_tree *tmp;
  190. while (*p) {
  191. parent = *p;
  192. tmp = rb_entry(parent, struct ocfs2_refcount_tree,
  193. rf_node);
  194. if (rf_blkno < tmp->rf_blkno)
  195. p = &(*p)->rb_left;
  196. else if (rf_blkno > tmp->rf_blkno)
  197. p = &(*p)->rb_right;
  198. else {
  199. /* This should never happen! */
  200. mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
  201. (unsigned long long)rf_blkno);
  202. BUG();
  203. }
  204. }
  205. rb_link_node(&new->rf_node, parent, p);
  206. rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
  207. }
  208. static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
  209. {
  210. ocfs2_metadata_cache_exit(&tree->rf_ci);
  211. ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
  212. ocfs2_lock_res_free(&tree->rf_lockres);
  213. kfree(tree);
  214. }
  215. static inline void
  216. ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
  217. struct ocfs2_refcount_tree *tree)
  218. {
  219. rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
  220. if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
  221. osb->osb_ref_tree_lru = NULL;
  222. }
  223. static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
  224. struct ocfs2_refcount_tree *tree)
  225. {
  226. spin_lock(&osb->osb_lock);
  227. ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
  228. spin_unlock(&osb->osb_lock);
  229. }
  230. void ocfs2_kref_remove_refcount_tree(struct kref *kref)
  231. {
  232. struct ocfs2_refcount_tree *tree =
  233. container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
  234. ocfs2_free_refcount_tree(tree);
  235. }
  236. static inline void
  237. ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
  238. {
  239. kref_get(&tree->rf_getcnt);
  240. }
  241. static inline void
  242. ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
  243. {
  244. kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
  245. }
  246. static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
  247. struct super_block *sb)
  248. {
  249. ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
  250. mutex_init(&new->rf_io_mutex);
  251. new->rf_sb = sb;
  252. spin_lock_init(&new->rf_lock);
  253. }
  254. static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
  255. struct ocfs2_refcount_tree *new,
  256. u64 rf_blkno, u32 generation)
  257. {
  258. init_rwsem(&new->rf_sem);
  259. ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
  260. rf_blkno, generation);
  261. }
  262. static struct ocfs2_refcount_tree*
  263. ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
  264. {
  265. struct ocfs2_refcount_tree *new;
  266. new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
  267. if (!new)
  268. return NULL;
  269. new->rf_blkno = rf_blkno;
  270. kref_init(&new->rf_getcnt);
  271. ocfs2_init_refcount_tree_ci(new, osb->sb);
  272. return new;
  273. }
  274. static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
  275. struct ocfs2_refcount_tree **ret_tree)
  276. {
  277. int ret = 0;
  278. struct ocfs2_refcount_tree *tree, *new = NULL;
  279. struct buffer_head *ref_root_bh = NULL;
  280. struct ocfs2_refcount_block *ref_rb;
  281. spin_lock(&osb->osb_lock);
  282. if (osb->osb_ref_tree_lru &&
  283. osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
  284. tree = osb->osb_ref_tree_lru;
  285. else
  286. tree = ocfs2_find_refcount_tree(osb, rf_blkno);
  287. if (tree)
  288. goto out;
  289. spin_unlock(&osb->osb_lock);
  290. new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
  291. if (!new) {
  292. ret = -ENOMEM;
  293. mlog_errno(ret);
  294. return ret;
  295. }
  296. /*
  297. * We need the generation to create the refcount tree lock and since
  298. * it isn't changed during the tree modification, we are safe here to
  299. * read without protection.
  300. * We also have to purge the cache after we create the lock since the
  301. * refcount block may have the stale data. It can only be trusted when
  302. * we hold the refcount lock.
  303. */
  304. ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
  305. if (ret) {
  306. mlog_errno(ret);
  307. ocfs2_metadata_cache_exit(&new->rf_ci);
  308. kfree(new);
  309. return ret;
  310. }
  311. ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  312. new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
  313. ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
  314. new->rf_generation);
  315. ocfs2_metadata_cache_purge(&new->rf_ci);
  316. spin_lock(&osb->osb_lock);
  317. tree = ocfs2_find_refcount_tree(osb, rf_blkno);
  318. if (tree)
  319. goto out;
  320. ocfs2_insert_refcount_tree(osb, new);
  321. tree = new;
  322. new = NULL;
  323. out:
  324. *ret_tree = tree;
  325. osb->osb_ref_tree_lru = tree;
  326. spin_unlock(&osb->osb_lock);
  327. if (new)
  328. ocfs2_free_refcount_tree(new);
  329. brelse(ref_root_bh);
  330. return ret;
  331. }
  332. static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
  333. {
  334. int ret;
  335. struct buffer_head *di_bh = NULL;
  336. struct ocfs2_dinode *di;
  337. ret = ocfs2_read_inode_block(inode, &di_bh);
  338. if (ret) {
  339. mlog_errno(ret);
  340. goto out;
  341. }
  342. BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  343. di = (struct ocfs2_dinode *)di_bh->b_data;
  344. *ref_blkno = le64_to_cpu(di->i_refcount_loc);
  345. brelse(di_bh);
  346. out:
  347. return ret;
  348. }
  349. static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
  350. struct ocfs2_refcount_tree *tree, int rw)
  351. {
  352. int ret;
  353. ret = ocfs2_refcount_lock(tree, rw);
  354. if (ret) {
  355. mlog_errno(ret);
  356. goto out;
  357. }
  358. if (rw)
  359. down_write(&tree->rf_sem);
  360. else
  361. down_read(&tree->rf_sem);
  362. out:
  363. return ret;
  364. }
  365. /*
  366. * Lock the refcount tree pointed by ref_blkno and return the tree.
  367. * In most case, we lock the tree and read the refcount block.
  368. * So read it here if the caller really needs it.
  369. *
  370. * If the tree has been re-created by other node, it will free the
  371. * old one and re-create it.
  372. */
  373. int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
  374. u64 ref_blkno, int rw,
  375. struct ocfs2_refcount_tree **ret_tree,
  376. struct buffer_head **ref_bh)
  377. {
  378. int ret, delete_tree = 0;
  379. struct ocfs2_refcount_tree *tree = NULL;
  380. struct buffer_head *ref_root_bh = NULL;
  381. struct ocfs2_refcount_block *rb;
  382. again:
  383. ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
  384. if (ret) {
  385. mlog_errno(ret);
  386. return ret;
  387. }
  388. ocfs2_refcount_tree_get(tree);
  389. ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
  390. if (ret) {
  391. mlog_errno(ret);
  392. ocfs2_refcount_tree_put(tree);
  393. goto out;
  394. }
  395. ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
  396. &ref_root_bh);
  397. if (ret) {
  398. mlog_errno(ret);
  399. ocfs2_unlock_refcount_tree(osb, tree, rw);
  400. ocfs2_refcount_tree_put(tree);
  401. goto out;
  402. }
  403. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  404. /*
  405. * If the refcount block has been freed and re-created, we may need
  406. * to recreate the refcount tree also.
  407. *
  408. * Here we just remove the tree from the rb-tree, and the last
  409. * kref holder will unlock and delete this refcount_tree.
  410. * Then we goto "again" and ocfs2_get_refcount_tree will create
  411. * the new refcount tree for us.
  412. */
  413. if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
  414. if (!tree->rf_removed) {
  415. ocfs2_erase_refcount_tree_from_list(osb, tree);
  416. tree->rf_removed = 1;
  417. delete_tree = 1;
  418. }
  419. ocfs2_unlock_refcount_tree(osb, tree, rw);
  420. /*
  421. * We get an extra reference when we create the refcount
  422. * tree, so another put will destroy it.
  423. */
  424. if (delete_tree)
  425. ocfs2_refcount_tree_put(tree);
  426. brelse(ref_root_bh);
  427. ref_root_bh = NULL;
  428. goto again;
  429. }
  430. *ret_tree = tree;
  431. if (ref_bh) {
  432. *ref_bh = ref_root_bh;
  433. ref_root_bh = NULL;
  434. }
  435. out:
  436. brelse(ref_root_bh);
  437. return ret;
  438. }
  439. int ocfs2_lock_refcount_tree_by_inode(struct inode *inode, int rw,
  440. struct ocfs2_refcount_tree **ret_tree,
  441. struct buffer_head **ref_bh)
  442. {
  443. int ret;
  444. u64 ref_blkno;
  445. ret = ocfs2_get_refcount_block(inode, &ref_blkno);
  446. if (ret) {
  447. mlog_errno(ret);
  448. return ret;
  449. }
  450. return ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno,
  451. rw, ret_tree, ref_bh);
  452. }
  453. void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
  454. struct ocfs2_refcount_tree *tree, int rw)
  455. {
  456. if (rw)
  457. up_write(&tree->rf_sem);
  458. else
  459. up_read(&tree->rf_sem);
  460. ocfs2_refcount_unlock(tree, rw);
  461. ocfs2_refcount_tree_put(tree);
  462. }
  463. void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
  464. {
  465. struct rb_node *node;
  466. struct ocfs2_refcount_tree *tree;
  467. struct rb_root *root = &osb->osb_rf_lock_tree;
  468. while ((node = rb_last(root)) != NULL) {
  469. tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
  470. mlog(0, "Purge tree %llu\n",
  471. (unsigned long long) tree->rf_blkno);
  472. rb_erase(&tree->rf_node, root);
  473. ocfs2_free_refcount_tree(tree);
  474. }
  475. }
  476. /*
  477. * Create a refcount tree for an inode.
  478. * We take for granted that the inode is already locked.
  479. */
  480. static int ocfs2_create_refcount_tree(struct inode *inode,
  481. struct buffer_head *di_bh)
  482. {
  483. int ret;
  484. handle_t *handle = NULL;
  485. struct ocfs2_alloc_context *meta_ac = NULL;
  486. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  487. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  488. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  489. struct buffer_head *new_bh = NULL;
  490. struct ocfs2_refcount_block *rb;
  491. struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
  492. u16 suballoc_bit_start;
  493. u32 num_got;
  494. u64 first_blkno;
  495. BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
  496. mlog(0, "create tree for inode %lu\n", inode->i_ino);
  497. ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
  498. if (ret) {
  499. mlog_errno(ret);
  500. goto out;
  501. }
  502. handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
  503. if (IS_ERR(handle)) {
  504. ret = PTR_ERR(handle);
  505. mlog_errno(ret);
  506. goto out;
  507. }
  508. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  509. OCFS2_JOURNAL_ACCESS_WRITE);
  510. if (ret) {
  511. mlog_errno(ret);
  512. goto out_commit;
  513. }
  514. ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
  515. &suballoc_bit_start, &num_got,
  516. &first_blkno);
  517. if (ret) {
  518. mlog_errno(ret);
  519. goto out_commit;
  520. }
  521. new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
  522. if (!new_tree) {
  523. ret = -ENOMEM;
  524. mlog_errno(ret);
  525. goto out_commit;
  526. }
  527. new_bh = sb_getblk(inode->i_sb, first_blkno);
  528. ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
  529. ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
  530. OCFS2_JOURNAL_ACCESS_CREATE);
  531. if (ret) {
  532. mlog_errno(ret);
  533. goto out_commit;
  534. }
  535. /* Initialize ocfs2_refcount_block. */
  536. rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  537. memset(rb, 0, inode->i_sb->s_blocksize);
  538. strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
  539. rb->rf_suballoc_slot = cpu_to_le16(osb->slot_num);
  540. rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  541. rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
  542. rb->rf_blkno = cpu_to_le64(first_blkno);
  543. rb->rf_count = cpu_to_le32(1);
  544. rb->rf_records.rl_count =
  545. cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
  546. spin_lock(&osb->osb_lock);
  547. rb->rf_generation = osb->s_next_generation++;
  548. spin_unlock(&osb->osb_lock);
  549. ocfs2_journal_dirty(handle, new_bh);
  550. spin_lock(&oi->ip_lock);
  551. oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
  552. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  553. di->i_refcount_loc = cpu_to_le64(first_blkno);
  554. spin_unlock(&oi->ip_lock);
  555. mlog(0, "created tree for inode %lu, refblock %llu\n",
  556. inode->i_ino, (unsigned long long)first_blkno);
  557. ocfs2_journal_dirty(handle, di_bh);
  558. /*
  559. * We have to init the tree lock here since it will use
  560. * the generation number to create it.
  561. */
  562. new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
  563. ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
  564. new_tree->rf_generation);
  565. spin_lock(&osb->osb_lock);
  566. tree = ocfs2_find_refcount_tree(osb, first_blkno);
  567. /*
  568. * We've just created a new refcount tree in this block. If
  569. * we found a refcount tree on the ocfs2_super, it must be
  570. * one we just deleted. We free the old tree before
  571. * inserting the new tree.
  572. */
  573. BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
  574. if (tree)
  575. ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
  576. ocfs2_insert_refcount_tree(osb, new_tree);
  577. spin_unlock(&osb->osb_lock);
  578. new_tree = NULL;
  579. if (tree)
  580. ocfs2_refcount_tree_put(tree);
  581. out_commit:
  582. ocfs2_commit_trans(osb, handle);
  583. out:
  584. if (new_tree) {
  585. ocfs2_metadata_cache_exit(&new_tree->rf_ci);
  586. kfree(new_tree);
  587. }
  588. brelse(new_bh);
  589. if (meta_ac)
  590. ocfs2_free_alloc_context(meta_ac);
  591. return ret;
  592. }
  593. static int ocfs2_set_refcount_tree(struct inode *inode,
  594. struct buffer_head *di_bh,
  595. u64 refcount_loc)
  596. {
  597. int ret;
  598. handle_t *handle = NULL;
  599. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  600. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  601. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  602. struct buffer_head *ref_root_bh = NULL;
  603. struct ocfs2_refcount_block *rb;
  604. struct ocfs2_refcount_tree *ref_tree;
  605. BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
  606. ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
  607. &ref_tree, &ref_root_bh);
  608. if (ret) {
  609. mlog_errno(ret);
  610. return ret;
  611. }
  612. handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
  613. if (IS_ERR(handle)) {
  614. ret = PTR_ERR(handle);
  615. mlog_errno(ret);
  616. goto out;
  617. }
  618. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  619. OCFS2_JOURNAL_ACCESS_WRITE);
  620. if (ret) {
  621. mlog_errno(ret);
  622. goto out_commit;
  623. }
  624. ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
  625. OCFS2_JOURNAL_ACCESS_WRITE);
  626. if (ret) {
  627. mlog_errno(ret);
  628. goto out_commit;
  629. }
  630. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  631. le32_add_cpu(&rb->rf_count, 1);
  632. ocfs2_journal_dirty(handle, ref_root_bh);
  633. spin_lock(&oi->ip_lock);
  634. oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
  635. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  636. di->i_refcount_loc = cpu_to_le64(refcount_loc);
  637. spin_unlock(&oi->ip_lock);
  638. ocfs2_journal_dirty(handle, di_bh);
  639. out_commit:
  640. ocfs2_commit_trans(osb, handle);
  641. out:
  642. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  643. brelse(ref_root_bh);
  644. return ret;
  645. }
  646. int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
  647. {
  648. int ret, delete_tree = 0;
  649. handle_t *handle = NULL;
  650. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  651. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  652. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  653. struct ocfs2_refcount_block *rb;
  654. struct inode *alloc_inode = NULL;
  655. struct buffer_head *alloc_bh = NULL;
  656. struct buffer_head *blk_bh = NULL;
  657. struct ocfs2_refcount_tree *ref_tree;
  658. int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
  659. u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
  660. u16 bit = 0;
  661. if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL))
  662. return 0;
  663. BUG_ON(!ref_blkno);
  664. ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
  665. if (ret) {
  666. mlog_errno(ret);
  667. return ret;
  668. }
  669. rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
  670. /*
  671. * If we are the last user, we need to free the block.
  672. * So lock the allocator ahead.
  673. */
  674. if (le32_to_cpu(rb->rf_count) == 1) {
  675. blk = le64_to_cpu(rb->rf_blkno);
  676. bit = le16_to_cpu(rb->rf_suballoc_bit);
  677. bg_blkno = ocfs2_which_suballoc_group(blk, bit);
  678. alloc_inode = ocfs2_get_system_file_inode(osb,
  679. EXTENT_ALLOC_SYSTEM_INODE,
  680. le16_to_cpu(rb->rf_suballoc_slot));
  681. if (!alloc_inode) {
  682. ret = -ENOMEM;
  683. mlog_errno(ret);
  684. goto out;
  685. }
  686. mutex_lock(&alloc_inode->i_mutex);
  687. ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
  688. if (ret) {
  689. mlog_errno(ret);
  690. goto out_mutex;
  691. }
  692. credits += OCFS2_SUBALLOC_FREE;
  693. }
  694. handle = ocfs2_start_trans(osb, credits);
  695. if (IS_ERR(handle)) {
  696. ret = PTR_ERR(handle);
  697. mlog_errno(ret);
  698. goto out_unlock;
  699. }
  700. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  701. OCFS2_JOURNAL_ACCESS_WRITE);
  702. if (ret) {
  703. mlog_errno(ret);
  704. goto out_commit;
  705. }
  706. ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
  707. OCFS2_JOURNAL_ACCESS_WRITE);
  708. if (ret) {
  709. mlog_errno(ret);
  710. goto out_commit;
  711. }
  712. spin_lock(&oi->ip_lock);
  713. oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
  714. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  715. di->i_refcount_loc = 0;
  716. spin_unlock(&oi->ip_lock);
  717. ocfs2_journal_dirty(handle, di_bh);
  718. le32_add_cpu(&rb->rf_count , -1);
  719. ocfs2_journal_dirty(handle, blk_bh);
  720. if (!rb->rf_count) {
  721. delete_tree = 1;
  722. ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
  723. ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
  724. alloc_bh, bit, bg_blkno, 1);
  725. if (ret)
  726. mlog_errno(ret);
  727. }
  728. out_commit:
  729. ocfs2_commit_trans(osb, handle);
  730. out_unlock:
  731. if (alloc_inode) {
  732. ocfs2_inode_unlock(alloc_inode, 1);
  733. brelse(alloc_bh);
  734. }
  735. out_mutex:
  736. if (alloc_inode) {
  737. mutex_unlock(&alloc_inode->i_mutex);
  738. iput(alloc_inode);
  739. }
  740. out:
  741. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  742. if (delete_tree)
  743. ocfs2_refcount_tree_put(ref_tree);
  744. brelse(blk_bh);
  745. return ret;
  746. }
  747. static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
  748. struct buffer_head *ref_leaf_bh,
  749. u64 cpos, unsigned int len,
  750. struct ocfs2_refcount_rec *ret_rec,
  751. int *index)
  752. {
  753. int i = 0;
  754. struct ocfs2_refcount_block *rb =
  755. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  756. struct ocfs2_refcount_rec *rec = NULL;
  757. for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
  758. rec = &rb->rf_records.rl_recs[i];
  759. if (le64_to_cpu(rec->r_cpos) +
  760. le32_to_cpu(rec->r_clusters) <= cpos)
  761. continue;
  762. else if (le64_to_cpu(rec->r_cpos) > cpos)
  763. break;
  764. /* ok, cpos fail in this rec. Just return. */
  765. if (ret_rec)
  766. *ret_rec = *rec;
  767. goto out;
  768. }
  769. if (ret_rec) {
  770. /* We meet with a hole here, so fake the rec. */
  771. ret_rec->r_cpos = cpu_to_le64(cpos);
  772. ret_rec->r_refcount = 0;
  773. if (i < le16_to_cpu(rb->rf_records.rl_used) &&
  774. le64_to_cpu(rec->r_cpos) < cpos + len)
  775. ret_rec->r_clusters =
  776. cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
  777. else
  778. ret_rec->r_clusters = cpu_to_le32(len);
  779. }
  780. out:
  781. *index = i;
  782. }
  783. /*
  784. * Try to remove refcount tree. The mechanism is:
  785. * 1) Check whether i_clusters == 0, if no, exit.
  786. * 2) check whether we have i_xattr_loc in dinode. if yes, exit.
  787. * 3) Check whether we have inline xattr stored outside, if yes, exit.
  788. * 4) Remove the tree.
  789. */
  790. int ocfs2_try_remove_refcount_tree(struct inode *inode,
  791. struct buffer_head *di_bh)
  792. {
  793. int ret;
  794. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  795. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  796. down_write(&oi->ip_xattr_sem);
  797. down_write(&oi->ip_alloc_sem);
  798. if (oi->ip_clusters)
  799. goto out;
  800. if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc)
  801. goto out;
  802. if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL &&
  803. ocfs2_has_inline_xattr_value_outside(inode, di))
  804. goto out;
  805. ret = ocfs2_remove_refcount_tree(inode, di_bh);
  806. if (ret)
  807. mlog_errno(ret);
  808. out:
  809. up_write(&oi->ip_alloc_sem);
  810. up_write(&oi->ip_xattr_sem);
  811. return 0;
  812. }
  813. /*
  814. * Given a cpos and len, try to find the refcount record which contains cpos.
  815. * 1. If cpos can be found in one refcount record, return the record.
  816. * 2. If cpos can't be found, return a fake record which start from cpos
  817. * and end at a small value between cpos+len and start of the next record.
  818. * This fake record has r_refcount = 0.
  819. */
  820. static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
  821. struct buffer_head *ref_root_bh,
  822. u64 cpos, unsigned int len,
  823. struct ocfs2_refcount_rec *ret_rec,
  824. int *index,
  825. struct buffer_head **ret_bh)
  826. {
  827. int ret = 0, i, found;
  828. u32 low_cpos;
  829. struct ocfs2_extent_list *el;
  830. struct ocfs2_extent_rec *tmp, *rec = NULL;
  831. struct ocfs2_extent_block *eb;
  832. struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
  833. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  834. struct ocfs2_refcount_block *rb =
  835. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  836. if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
  837. ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
  838. ret_rec, index);
  839. *ret_bh = ref_root_bh;
  840. get_bh(ref_root_bh);
  841. return 0;
  842. }
  843. el = &rb->rf_list;
  844. low_cpos = cpos & OCFS2_32BIT_POS_MASK;
  845. if (el->l_tree_depth) {
  846. ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
  847. if (ret) {
  848. mlog_errno(ret);
  849. goto out;
  850. }
  851. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  852. el = &eb->h_list;
  853. if (el->l_tree_depth) {
  854. ocfs2_error(sb,
  855. "refcount tree %llu has non zero tree "
  856. "depth in leaf btree tree block %llu\n",
  857. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  858. (unsigned long long)eb_bh->b_blocknr);
  859. ret = -EROFS;
  860. goto out;
  861. }
  862. }
  863. found = 0;
  864. for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
  865. rec = &el->l_recs[i];
  866. if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
  867. found = 1;
  868. break;
  869. }
  870. }
  871. /* adjust len when we have ocfs2_extent_rec after it. */
  872. if (found && i < le16_to_cpu(el->l_next_free_rec) - 1) {
  873. tmp = &el->l_recs[i+1];
  874. if (le32_to_cpu(tmp->e_cpos) < cpos + len)
  875. len = le32_to_cpu(tmp->e_cpos) - cpos;
  876. }
  877. ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
  878. &ref_leaf_bh);
  879. if (ret) {
  880. mlog_errno(ret);
  881. goto out;
  882. }
  883. ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
  884. ret_rec, index);
  885. *ret_bh = ref_leaf_bh;
  886. out:
  887. brelse(eb_bh);
  888. return ret;
  889. }
  890. enum ocfs2_ref_rec_contig {
  891. REF_CONTIG_NONE = 0,
  892. REF_CONTIG_LEFT,
  893. REF_CONTIG_RIGHT,
  894. REF_CONTIG_LEFTRIGHT,
  895. };
  896. static enum ocfs2_ref_rec_contig
  897. ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
  898. int index)
  899. {
  900. if ((rb->rf_records.rl_recs[index].r_refcount ==
  901. rb->rf_records.rl_recs[index + 1].r_refcount) &&
  902. (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
  903. le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
  904. le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
  905. return REF_CONTIG_RIGHT;
  906. return REF_CONTIG_NONE;
  907. }
  908. static enum ocfs2_ref_rec_contig
  909. ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
  910. int index)
  911. {
  912. enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
  913. if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
  914. ret = ocfs2_refcount_rec_adjacent(rb, index);
  915. if (index > 0) {
  916. enum ocfs2_ref_rec_contig tmp;
  917. tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
  918. if (tmp == REF_CONTIG_RIGHT) {
  919. if (ret == REF_CONTIG_RIGHT)
  920. ret = REF_CONTIG_LEFTRIGHT;
  921. else
  922. ret = REF_CONTIG_LEFT;
  923. }
  924. }
  925. return ret;
  926. }
  927. static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
  928. int index)
  929. {
  930. BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
  931. rb->rf_records.rl_recs[index+1].r_refcount);
  932. le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
  933. le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
  934. if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
  935. memmove(&rb->rf_records.rl_recs[index + 1],
  936. &rb->rf_records.rl_recs[index + 2],
  937. sizeof(struct ocfs2_refcount_rec) *
  938. (le16_to_cpu(rb->rf_records.rl_used) - index - 2));
  939. memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
  940. 0, sizeof(struct ocfs2_refcount_rec));
  941. le16_add_cpu(&rb->rf_records.rl_used, -1);
  942. }
  943. /*
  944. * Merge the refcount rec if we are contiguous with the adjacent recs.
  945. */
  946. static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
  947. int index)
  948. {
  949. enum ocfs2_ref_rec_contig contig =
  950. ocfs2_refcount_rec_contig(rb, index);
  951. if (contig == REF_CONTIG_NONE)
  952. return;
  953. if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
  954. BUG_ON(index == 0);
  955. index--;
  956. }
  957. ocfs2_rotate_refcount_rec_left(rb, index);
  958. if (contig == REF_CONTIG_LEFTRIGHT)
  959. ocfs2_rotate_refcount_rec_left(rb, index);
  960. }
  961. /*
  962. * Change the refcount indexed by "index" in ref_bh.
  963. * If refcount reaches 0, remove it.
  964. */
  965. static int ocfs2_change_refcount_rec(handle_t *handle,
  966. struct ocfs2_caching_info *ci,
  967. struct buffer_head *ref_leaf_bh,
  968. int index, int merge, int change)
  969. {
  970. int ret;
  971. struct ocfs2_refcount_block *rb =
  972. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  973. struct ocfs2_refcount_list *rl = &rb->rf_records;
  974. struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
  975. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  976. OCFS2_JOURNAL_ACCESS_WRITE);
  977. if (ret) {
  978. mlog_errno(ret);
  979. goto out;
  980. }
  981. mlog(0, "change index %d, old count %u, change %d\n", index,
  982. le32_to_cpu(rec->r_refcount), change);
  983. le32_add_cpu(&rec->r_refcount, change);
  984. if (!rec->r_refcount) {
  985. if (index != le16_to_cpu(rl->rl_used) - 1) {
  986. memmove(rec, rec + 1,
  987. (le16_to_cpu(rl->rl_used) - index - 1) *
  988. sizeof(struct ocfs2_refcount_rec));
  989. memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
  990. 0, sizeof(struct ocfs2_refcount_rec));
  991. }
  992. le16_add_cpu(&rl->rl_used, -1);
  993. } else if (merge)
  994. ocfs2_refcount_rec_merge(rb, index);
  995. ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
  996. if (ret)
  997. mlog_errno(ret);
  998. out:
  999. return ret;
  1000. }
  1001. static int ocfs2_expand_inline_ref_root(handle_t *handle,
  1002. struct ocfs2_caching_info *ci,
  1003. struct buffer_head *ref_root_bh,
  1004. struct buffer_head **ref_leaf_bh,
  1005. struct ocfs2_alloc_context *meta_ac)
  1006. {
  1007. int ret;
  1008. u16 suballoc_bit_start;
  1009. u32 num_got;
  1010. u64 blkno;
  1011. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1012. struct buffer_head *new_bh = NULL;
  1013. struct ocfs2_refcount_block *new_rb;
  1014. struct ocfs2_refcount_block *root_rb =
  1015. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1016. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  1017. OCFS2_JOURNAL_ACCESS_WRITE);
  1018. if (ret) {
  1019. mlog_errno(ret);
  1020. goto out;
  1021. }
  1022. ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
  1023. &suballoc_bit_start, &num_got,
  1024. &blkno);
  1025. if (ret) {
  1026. mlog_errno(ret);
  1027. goto out;
  1028. }
  1029. new_bh = sb_getblk(sb, blkno);
  1030. if (new_bh == NULL) {
  1031. ret = -EIO;
  1032. mlog_errno(ret);
  1033. goto out;
  1034. }
  1035. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  1036. ret = ocfs2_journal_access_rb(handle, ci, new_bh,
  1037. OCFS2_JOURNAL_ACCESS_CREATE);
  1038. if (ret) {
  1039. mlog_errno(ret);
  1040. goto out;
  1041. }
  1042. /*
  1043. * Initialize ocfs2_refcount_block.
  1044. * It should contain the same information as the old root.
  1045. * so just memcpy it and change the corresponding field.
  1046. */
  1047. memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
  1048. new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  1049. new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
  1050. new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  1051. new_rb->rf_blkno = cpu_to_le64(blkno);
  1052. new_rb->rf_cpos = cpu_to_le32(0);
  1053. new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
  1054. new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
  1055. ocfs2_journal_dirty(handle, new_bh);
  1056. /* Now change the root. */
  1057. memset(&root_rb->rf_list, 0, sb->s_blocksize -
  1058. offsetof(struct ocfs2_refcount_block, rf_list));
  1059. root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
  1060. root_rb->rf_clusters = cpu_to_le32(1);
  1061. root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
  1062. root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
  1063. root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
  1064. root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
  1065. ocfs2_journal_dirty(handle, ref_root_bh);
  1066. mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno,
  1067. le16_to_cpu(new_rb->rf_records.rl_used));
  1068. *ref_leaf_bh = new_bh;
  1069. new_bh = NULL;
  1070. out:
  1071. brelse(new_bh);
  1072. return ret;
  1073. }
  1074. static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
  1075. struct ocfs2_refcount_rec *next)
  1076. {
  1077. if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
  1078. ocfs2_get_ref_rec_low_cpos(next))
  1079. return 1;
  1080. return 0;
  1081. }
  1082. static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
  1083. {
  1084. const struct ocfs2_refcount_rec *l = a, *r = b;
  1085. u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
  1086. u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
  1087. if (l_cpos > r_cpos)
  1088. return 1;
  1089. if (l_cpos < r_cpos)
  1090. return -1;
  1091. return 0;
  1092. }
  1093. static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
  1094. {
  1095. const struct ocfs2_refcount_rec *l = a, *r = b;
  1096. u64 l_cpos = le64_to_cpu(l->r_cpos);
  1097. u64 r_cpos = le64_to_cpu(r->r_cpos);
  1098. if (l_cpos > r_cpos)
  1099. return 1;
  1100. if (l_cpos < r_cpos)
  1101. return -1;
  1102. return 0;
  1103. }
  1104. static void swap_refcount_rec(void *a, void *b, int size)
  1105. {
  1106. struct ocfs2_refcount_rec *l = a, *r = b, tmp;
  1107. tmp = *(struct ocfs2_refcount_rec *)l;
  1108. *(struct ocfs2_refcount_rec *)l =
  1109. *(struct ocfs2_refcount_rec *)r;
  1110. *(struct ocfs2_refcount_rec *)r = tmp;
  1111. }
  1112. /*
  1113. * The refcount cpos are ordered by their 64bit cpos,
  1114. * But we will use the low 32 bit to be the e_cpos in the b-tree.
  1115. * So we need to make sure that this pos isn't intersected with others.
  1116. *
  1117. * Note: The refcount block is already sorted by their low 32 bit cpos,
  1118. * So just try the middle pos first, and we will exit when we find
  1119. * the good position.
  1120. */
  1121. static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
  1122. u32 *split_pos, int *split_index)
  1123. {
  1124. int num_used = le16_to_cpu(rl->rl_used);
  1125. int delta, middle = num_used / 2;
  1126. for (delta = 0; delta < middle; delta++) {
  1127. /* Let's check delta earlier than middle */
  1128. if (ocfs2_refcount_rec_no_intersect(
  1129. &rl->rl_recs[middle - delta - 1],
  1130. &rl->rl_recs[middle - delta])) {
  1131. *split_index = middle - delta;
  1132. break;
  1133. }
  1134. /* For even counts, don't walk off the end */
  1135. if ((middle + delta + 1) == num_used)
  1136. continue;
  1137. /* Now try delta past middle */
  1138. if (ocfs2_refcount_rec_no_intersect(
  1139. &rl->rl_recs[middle + delta],
  1140. &rl->rl_recs[middle + delta + 1])) {
  1141. *split_index = middle + delta + 1;
  1142. break;
  1143. }
  1144. }
  1145. if (delta >= middle)
  1146. return -ENOSPC;
  1147. *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
  1148. return 0;
  1149. }
  1150. static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
  1151. struct buffer_head *new_bh,
  1152. u32 *split_cpos)
  1153. {
  1154. int split_index = 0, num_moved, ret;
  1155. u32 cpos = 0;
  1156. struct ocfs2_refcount_block *rb =
  1157. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1158. struct ocfs2_refcount_list *rl = &rb->rf_records;
  1159. struct ocfs2_refcount_block *new_rb =
  1160. (struct ocfs2_refcount_block *)new_bh->b_data;
  1161. struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
  1162. mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n",
  1163. (unsigned long long)ref_leaf_bh->b_blocknr,
  1164. le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
  1165. /*
  1166. * XXX: Improvement later.
  1167. * If we know all the high 32 bit cpos is the same, no need to sort.
  1168. *
  1169. * In order to make the whole process safe, we do:
  1170. * 1. sort the entries by their low 32 bit cpos first so that we can
  1171. * find the split cpos easily.
  1172. * 2. call ocfs2_insert_extent to insert the new refcount block.
  1173. * 3. move the refcount rec to the new block.
  1174. * 4. sort the entries by their 64 bit cpos.
  1175. * 5. dirty the new_rb and rb.
  1176. */
  1177. sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
  1178. sizeof(struct ocfs2_refcount_rec),
  1179. cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
  1180. ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
  1181. if (ret) {
  1182. mlog_errno(ret);
  1183. return ret;
  1184. }
  1185. new_rb->rf_cpos = cpu_to_le32(cpos);
  1186. /* move refcount records starting from split_index to the new block. */
  1187. num_moved = le16_to_cpu(rl->rl_used) - split_index;
  1188. memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
  1189. num_moved * sizeof(struct ocfs2_refcount_rec));
  1190. /*ok, remove the entries we just moved over to the other block. */
  1191. memset(&rl->rl_recs[split_index], 0,
  1192. num_moved * sizeof(struct ocfs2_refcount_rec));
  1193. /* change old and new rl_used accordingly. */
  1194. le16_add_cpu(&rl->rl_used, -num_moved);
  1195. new_rl->rl_used = cpu_to_le32(num_moved);
  1196. sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
  1197. sizeof(struct ocfs2_refcount_rec),
  1198. cmp_refcount_rec_by_cpos, swap_refcount_rec);
  1199. sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
  1200. sizeof(struct ocfs2_refcount_rec),
  1201. cmp_refcount_rec_by_cpos, swap_refcount_rec);
  1202. *split_cpos = cpos;
  1203. return 0;
  1204. }
  1205. static int ocfs2_new_leaf_refcount_block(handle_t *handle,
  1206. struct ocfs2_caching_info *ci,
  1207. struct buffer_head *ref_root_bh,
  1208. struct buffer_head *ref_leaf_bh,
  1209. struct ocfs2_alloc_context *meta_ac)
  1210. {
  1211. int ret;
  1212. u16 suballoc_bit_start;
  1213. u32 num_got, new_cpos;
  1214. u64 blkno;
  1215. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1216. struct ocfs2_refcount_block *root_rb =
  1217. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1218. struct buffer_head *new_bh = NULL;
  1219. struct ocfs2_refcount_block *new_rb;
  1220. struct ocfs2_extent_tree ref_et;
  1221. BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
  1222. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  1223. OCFS2_JOURNAL_ACCESS_WRITE);
  1224. if (ret) {
  1225. mlog_errno(ret);
  1226. goto out;
  1227. }
  1228. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1229. OCFS2_JOURNAL_ACCESS_WRITE);
  1230. if (ret) {
  1231. mlog_errno(ret);
  1232. goto out;
  1233. }
  1234. ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
  1235. &suballoc_bit_start, &num_got,
  1236. &blkno);
  1237. if (ret) {
  1238. mlog_errno(ret);
  1239. goto out;
  1240. }
  1241. new_bh = sb_getblk(sb, blkno);
  1242. if (new_bh == NULL) {
  1243. ret = -EIO;
  1244. mlog_errno(ret);
  1245. goto out;
  1246. }
  1247. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  1248. ret = ocfs2_journal_access_rb(handle, ci, new_bh,
  1249. OCFS2_JOURNAL_ACCESS_CREATE);
  1250. if (ret) {
  1251. mlog_errno(ret);
  1252. goto out;
  1253. }
  1254. /* Initialize ocfs2_refcount_block. */
  1255. new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  1256. memset(new_rb, 0, sb->s_blocksize);
  1257. strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
  1258. new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
  1259. new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  1260. new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
  1261. new_rb->rf_blkno = cpu_to_le64(blkno);
  1262. new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
  1263. new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
  1264. new_rb->rf_records.rl_count =
  1265. cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
  1266. new_rb->rf_generation = root_rb->rf_generation;
  1267. ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
  1268. if (ret) {
  1269. mlog_errno(ret);
  1270. goto out;
  1271. }
  1272. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1273. ocfs2_journal_dirty(handle, new_bh);
  1274. ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
  1275. mlog(0, "insert new leaf block %llu at %u\n",
  1276. (unsigned long long)new_bh->b_blocknr, new_cpos);
  1277. /* Insert the new leaf block with the specific offset cpos. */
  1278. ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
  1279. 1, 0, meta_ac);
  1280. if (ret)
  1281. mlog_errno(ret);
  1282. out:
  1283. brelse(new_bh);
  1284. return ret;
  1285. }
  1286. static int ocfs2_expand_refcount_tree(handle_t *handle,
  1287. struct ocfs2_caching_info *ci,
  1288. struct buffer_head *ref_root_bh,
  1289. struct buffer_head *ref_leaf_bh,
  1290. struct ocfs2_alloc_context *meta_ac)
  1291. {
  1292. int ret;
  1293. struct buffer_head *expand_bh = NULL;
  1294. if (ref_root_bh == ref_leaf_bh) {
  1295. /*
  1296. * the old root bh hasn't been expanded to a b-tree,
  1297. * so expand it first.
  1298. */
  1299. ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
  1300. &expand_bh, meta_ac);
  1301. if (ret) {
  1302. mlog_errno(ret);
  1303. goto out;
  1304. }
  1305. } else {
  1306. expand_bh = ref_leaf_bh;
  1307. get_bh(expand_bh);
  1308. }
  1309. /* Now add a new refcount block into the tree.*/
  1310. ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
  1311. expand_bh, meta_ac);
  1312. if (ret)
  1313. mlog_errno(ret);
  1314. out:
  1315. brelse(expand_bh);
  1316. return ret;
  1317. }
  1318. /*
  1319. * Adjust the extent rec in b-tree representing ref_leaf_bh.
  1320. *
  1321. * Only called when we have inserted a new refcount rec at index 0
  1322. * which means ocfs2_extent_rec.e_cpos may need some change.
  1323. */
  1324. static int ocfs2_adjust_refcount_rec(handle_t *handle,
  1325. struct ocfs2_caching_info *ci,
  1326. struct buffer_head *ref_root_bh,
  1327. struct buffer_head *ref_leaf_bh,
  1328. struct ocfs2_refcount_rec *rec)
  1329. {
  1330. int ret = 0, i;
  1331. u32 new_cpos, old_cpos;
  1332. struct ocfs2_path *path = NULL;
  1333. struct ocfs2_extent_tree et;
  1334. struct ocfs2_refcount_block *rb =
  1335. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1336. struct ocfs2_extent_list *el;
  1337. if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
  1338. goto out;
  1339. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1340. old_cpos = le32_to_cpu(rb->rf_cpos);
  1341. new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
  1342. if (old_cpos <= new_cpos)
  1343. goto out;
  1344. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  1345. path = ocfs2_new_path_from_et(&et);
  1346. if (!path) {
  1347. ret = -ENOMEM;
  1348. mlog_errno(ret);
  1349. goto out;
  1350. }
  1351. ret = ocfs2_find_path(ci, path, old_cpos);
  1352. if (ret) {
  1353. mlog_errno(ret);
  1354. goto out;
  1355. }
  1356. /*
  1357. * 2 more credits, one for the leaf refcount block, one for
  1358. * the extent block contains the extent rec.
  1359. */
  1360. ret = ocfs2_extend_trans(handle, handle->h_buffer_credits + 2);
  1361. if (ret < 0) {
  1362. mlog_errno(ret);
  1363. goto out;
  1364. }
  1365. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1366. OCFS2_JOURNAL_ACCESS_WRITE);
  1367. if (ret < 0) {
  1368. mlog_errno(ret);
  1369. goto out;
  1370. }
  1371. ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
  1372. OCFS2_JOURNAL_ACCESS_WRITE);
  1373. if (ret < 0) {
  1374. mlog_errno(ret);
  1375. goto out;
  1376. }
  1377. /* change the leaf extent block first. */
  1378. el = path_leaf_el(path);
  1379. for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
  1380. if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
  1381. break;
  1382. BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
  1383. el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
  1384. /* change the r_cpos in the leaf block. */
  1385. rb->rf_cpos = cpu_to_le32(new_cpos);
  1386. ocfs2_journal_dirty(handle, path_leaf_bh(path));
  1387. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1388. out:
  1389. ocfs2_free_path(path);
  1390. return ret;
  1391. }
  1392. static int ocfs2_insert_refcount_rec(handle_t *handle,
  1393. struct ocfs2_caching_info *ci,
  1394. struct buffer_head *ref_root_bh,
  1395. struct buffer_head *ref_leaf_bh,
  1396. struct ocfs2_refcount_rec *rec,
  1397. int index, int merge,
  1398. struct ocfs2_alloc_context *meta_ac)
  1399. {
  1400. int ret;
  1401. struct ocfs2_refcount_block *rb =
  1402. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1403. struct ocfs2_refcount_list *rf_list = &rb->rf_records;
  1404. struct buffer_head *new_bh = NULL;
  1405. BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
  1406. if (rf_list->rl_used == rf_list->rl_count) {
  1407. u64 cpos = le64_to_cpu(rec->r_cpos);
  1408. u32 len = le32_to_cpu(rec->r_clusters);
  1409. ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
  1410. ref_leaf_bh, meta_ac);
  1411. if (ret) {
  1412. mlog_errno(ret);
  1413. goto out;
  1414. }
  1415. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1416. cpos, len, NULL, &index,
  1417. &new_bh);
  1418. if (ret) {
  1419. mlog_errno(ret);
  1420. goto out;
  1421. }
  1422. ref_leaf_bh = new_bh;
  1423. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1424. rf_list = &rb->rf_records;
  1425. }
  1426. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1427. OCFS2_JOURNAL_ACCESS_WRITE);
  1428. if (ret) {
  1429. mlog_errno(ret);
  1430. goto out;
  1431. }
  1432. if (index < le16_to_cpu(rf_list->rl_used))
  1433. memmove(&rf_list->rl_recs[index + 1],
  1434. &rf_list->rl_recs[index],
  1435. (le16_to_cpu(rf_list->rl_used) - index) *
  1436. sizeof(struct ocfs2_refcount_rec));
  1437. mlog(0, "insert refcount record start %llu, len %u, count %u "
  1438. "to leaf block %llu at index %d\n",
  1439. (unsigned long long)le64_to_cpu(rec->r_cpos),
  1440. le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount),
  1441. (unsigned long long)ref_leaf_bh->b_blocknr, index);
  1442. rf_list->rl_recs[index] = *rec;
  1443. le16_add_cpu(&rf_list->rl_used, 1);
  1444. if (merge)
  1445. ocfs2_refcount_rec_merge(rb, index);
  1446. ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
  1447. if (ret) {
  1448. mlog_errno(ret);
  1449. goto out;
  1450. }
  1451. if (index == 0) {
  1452. ret = ocfs2_adjust_refcount_rec(handle, ci,
  1453. ref_root_bh,
  1454. ref_leaf_bh, rec);
  1455. if (ret)
  1456. mlog_errno(ret);
  1457. }
  1458. out:
  1459. brelse(new_bh);
  1460. return ret;
  1461. }
  1462. /*
  1463. * Split the refcount_rec indexed by "index" in ref_leaf_bh.
  1464. * This is much simple than our b-tree code.
  1465. * split_rec is the new refcount rec we want to insert.
  1466. * If split_rec->r_refcount > 0, we are changing the refcount(in case we
  1467. * increase refcount or decrease a refcount to non-zero).
  1468. * If split_rec->r_refcount == 0, we are punching a hole in current refcount
  1469. * rec( in case we decrease a refcount to zero).
  1470. */
  1471. static int ocfs2_split_refcount_rec(handle_t *handle,
  1472. struct ocfs2_caching_info *ci,
  1473. struct buffer_head *ref_root_bh,
  1474. struct buffer_head *ref_leaf_bh,
  1475. struct ocfs2_refcount_rec *split_rec,
  1476. int index, int merge,
  1477. struct ocfs2_alloc_context *meta_ac,
  1478. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1479. {
  1480. int ret, recs_need;
  1481. u32 len;
  1482. struct ocfs2_refcount_block *rb =
  1483. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1484. struct ocfs2_refcount_list *rf_list = &rb->rf_records;
  1485. struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
  1486. struct ocfs2_refcount_rec *tail_rec = NULL;
  1487. struct buffer_head *new_bh = NULL;
  1488. BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
  1489. mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n",
  1490. le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters),
  1491. le64_to_cpu(split_rec->r_cpos),
  1492. le32_to_cpu(split_rec->r_clusters));
  1493. /*
  1494. * If we just need to split the header or tail clusters,
  1495. * no more recs are needed, just split is OK.
  1496. * Otherwise we at least need one new recs.
  1497. */
  1498. if (!split_rec->r_refcount &&
  1499. (split_rec->r_cpos == orig_rec->r_cpos ||
  1500. le64_to_cpu(split_rec->r_cpos) +
  1501. le32_to_cpu(split_rec->r_clusters) ==
  1502. le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
  1503. recs_need = 0;
  1504. else
  1505. recs_need = 1;
  1506. /*
  1507. * We need one more rec if we split in the middle and the new rec have
  1508. * some refcount in it.
  1509. */
  1510. if (split_rec->r_refcount &&
  1511. (split_rec->r_cpos != orig_rec->r_cpos &&
  1512. le64_to_cpu(split_rec->r_cpos) +
  1513. le32_to_cpu(split_rec->r_clusters) !=
  1514. le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
  1515. recs_need++;
  1516. /* If the leaf block don't have enough record, expand it. */
  1517. if (le16_to_cpu(rf_list->rl_used) + recs_need > rf_list->rl_count) {
  1518. struct ocfs2_refcount_rec tmp_rec;
  1519. u64 cpos = le64_to_cpu(orig_rec->r_cpos);
  1520. len = le32_to_cpu(orig_rec->r_clusters);
  1521. ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
  1522. ref_leaf_bh, meta_ac);
  1523. if (ret) {
  1524. mlog_errno(ret);
  1525. goto out;
  1526. }
  1527. /*
  1528. * We have to re-get it since now cpos may be moved to
  1529. * another leaf block.
  1530. */
  1531. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1532. cpos, len, &tmp_rec, &index,
  1533. &new_bh);
  1534. if (ret) {
  1535. mlog_errno(ret);
  1536. goto out;
  1537. }
  1538. ref_leaf_bh = new_bh;
  1539. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1540. rf_list = &rb->rf_records;
  1541. orig_rec = &rf_list->rl_recs[index];
  1542. }
  1543. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1544. OCFS2_JOURNAL_ACCESS_WRITE);
  1545. if (ret) {
  1546. mlog_errno(ret);
  1547. goto out;
  1548. }
  1549. /*
  1550. * We have calculated out how many new records we need and store
  1551. * in recs_need, so spare enough space first by moving the records
  1552. * after "index" to the end.
  1553. */
  1554. if (index != le16_to_cpu(rf_list->rl_used) - 1)
  1555. memmove(&rf_list->rl_recs[index + 1 + recs_need],
  1556. &rf_list->rl_recs[index + 1],
  1557. (le16_to_cpu(rf_list->rl_used) - index - 1) *
  1558. sizeof(struct ocfs2_refcount_rec));
  1559. len = (le64_to_cpu(orig_rec->r_cpos) +
  1560. le32_to_cpu(orig_rec->r_clusters)) -
  1561. (le64_to_cpu(split_rec->r_cpos) +
  1562. le32_to_cpu(split_rec->r_clusters));
  1563. /*
  1564. * If we have "len", the we will split in the tail and move it
  1565. * to the end of the space we have just spared.
  1566. */
  1567. if (len) {
  1568. tail_rec = &rf_list->rl_recs[index + recs_need];
  1569. memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
  1570. le64_add_cpu(&tail_rec->r_cpos,
  1571. le32_to_cpu(tail_rec->r_clusters) - len);
  1572. tail_rec->r_clusters = le32_to_cpu(len);
  1573. }
  1574. /*
  1575. * If the split pos isn't the same as the original one, we need to
  1576. * split in the head.
  1577. *
  1578. * Note: We have the chance that split_rec.r_refcount = 0,
  1579. * recs_need = 0 and len > 0, which means we just cut the head from
  1580. * the orig_rec and in that case we have done some modification in
  1581. * orig_rec above, so the check for r_cpos is faked.
  1582. */
  1583. if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
  1584. len = le64_to_cpu(split_rec->r_cpos) -
  1585. le64_to_cpu(orig_rec->r_cpos);
  1586. orig_rec->r_clusters = cpu_to_le32(len);
  1587. index++;
  1588. }
  1589. le16_add_cpu(&rf_list->rl_used, recs_need);
  1590. if (split_rec->r_refcount) {
  1591. rf_list->rl_recs[index] = *split_rec;
  1592. mlog(0, "insert refcount record start %llu, len %u, count %u "
  1593. "to leaf block %llu at index %d\n",
  1594. (unsigned long long)le64_to_cpu(split_rec->r_cpos),
  1595. le32_to_cpu(split_rec->r_clusters),
  1596. le32_to_cpu(split_rec->r_refcount),
  1597. (unsigned long long)ref_leaf_bh->b_blocknr, index);
  1598. if (merge)
  1599. ocfs2_refcount_rec_merge(rb, index);
  1600. }
  1601. ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
  1602. if (ret)
  1603. mlog_errno(ret);
  1604. out:
  1605. brelse(new_bh);
  1606. return ret;
  1607. }
  1608. static int __ocfs2_increase_refcount(handle_t *handle,
  1609. struct ocfs2_caching_info *ci,
  1610. struct buffer_head *ref_root_bh,
  1611. u64 cpos, u32 len, int merge,
  1612. struct ocfs2_alloc_context *meta_ac,
  1613. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1614. {
  1615. int ret = 0, index;
  1616. struct buffer_head *ref_leaf_bh = NULL;
  1617. struct ocfs2_refcount_rec rec;
  1618. unsigned int set_len = 0;
  1619. mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n",
  1620. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  1621. (unsigned long long)cpos, len);
  1622. while (len) {
  1623. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1624. cpos, len, &rec, &index,
  1625. &ref_leaf_bh);
  1626. if (ret) {
  1627. mlog_errno(ret);
  1628. goto out;
  1629. }
  1630. set_len = le32_to_cpu(rec.r_clusters);
  1631. /*
  1632. * Here we may meet with 3 situations:
  1633. *
  1634. * 1. If we find an already existing record, and the length
  1635. * is the same, cool, we just need to increase the r_refcount
  1636. * and it is OK.
  1637. * 2. If we find a hole, just insert it with r_refcount = 1.
  1638. * 3. If we are in the middle of one extent record, split
  1639. * it.
  1640. */
  1641. if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
  1642. set_len <= len) {
  1643. mlog(0, "increase refcount rec, start %llu, len %u, "
  1644. "count %u\n", (unsigned long long)cpos, set_len,
  1645. le32_to_cpu(rec.r_refcount));
  1646. ret = ocfs2_change_refcount_rec(handle, ci,
  1647. ref_leaf_bh, index,
  1648. merge, 1);
  1649. if (ret) {
  1650. mlog_errno(ret);
  1651. goto out;
  1652. }
  1653. } else if (!rec.r_refcount) {
  1654. rec.r_refcount = cpu_to_le32(1);
  1655. mlog(0, "insert refcount rec, start %llu, len %u\n",
  1656. (unsigned long long)le64_to_cpu(rec.r_cpos),
  1657. set_len);
  1658. ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
  1659. ref_leaf_bh,
  1660. &rec, index,
  1661. merge, meta_ac);
  1662. if (ret) {
  1663. mlog_errno(ret);
  1664. goto out;
  1665. }
  1666. } else {
  1667. set_len = min((u64)(cpos + len),
  1668. le64_to_cpu(rec.r_cpos) + set_len) - cpos;
  1669. rec.r_cpos = cpu_to_le64(cpos);
  1670. rec.r_clusters = cpu_to_le32(set_len);
  1671. le32_add_cpu(&rec.r_refcount, 1);
  1672. mlog(0, "split refcount rec, start %llu, "
  1673. "len %u, count %u\n",
  1674. (unsigned long long)le64_to_cpu(rec.r_cpos),
  1675. set_len, le32_to_cpu(rec.r_refcount));
  1676. ret = ocfs2_split_refcount_rec(handle, ci,
  1677. ref_root_bh, ref_leaf_bh,
  1678. &rec, index, merge,
  1679. meta_ac, dealloc);
  1680. if (ret) {
  1681. mlog_errno(ret);
  1682. goto out;
  1683. }
  1684. }
  1685. cpos += set_len;
  1686. len -= set_len;
  1687. brelse(ref_leaf_bh);
  1688. ref_leaf_bh = NULL;
  1689. }
  1690. out:
  1691. brelse(ref_leaf_bh);
  1692. return ret;
  1693. }
  1694. static int ocfs2_remove_refcount_extent(handle_t *handle,
  1695. struct ocfs2_caching_info *ci,
  1696. struct buffer_head *ref_root_bh,
  1697. struct buffer_head *ref_leaf_bh,
  1698. struct ocfs2_alloc_context *meta_ac,
  1699. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1700. {
  1701. int ret;
  1702. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1703. struct ocfs2_refcount_block *rb =
  1704. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1705. struct ocfs2_extent_tree et;
  1706. BUG_ON(rb->rf_records.rl_used);
  1707. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  1708. ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
  1709. 1, meta_ac, dealloc);
  1710. if (ret) {
  1711. mlog_errno(ret);
  1712. goto out;
  1713. }
  1714. ocfs2_remove_from_cache(ci, ref_leaf_bh);
  1715. /*
  1716. * add the freed block to the dealloc so that it will be freed
  1717. * when we run dealloc.
  1718. */
  1719. ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
  1720. le16_to_cpu(rb->rf_suballoc_slot),
  1721. le64_to_cpu(rb->rf_blkno),
  1722. le16_to_cpu(rb->rf_suballoc_bit));
  1723. if (ret) {
  1724. mlog_errno(ret);
  1725. goto out;
  1726. }
  1727. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  1728. OCFS2_JOURNAL_ACCESS_WRITE);
  1729. if (ret) {
  1730. mlog_errno(ret);
  1731. goto out;
  1732. }
  1733. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1734. le32_add_cpu(&rb->rf_clusters, -1);
  1735. /*
  1736. * check whether we need to restore the root refcount block if
  1737. * there is no leaf extent block at atll.
  1738. */
  1739. if (!rb->rf_list.l_next_free_rec) {
  1740. BUG_ON(rb->rf_clusters);
  1741. mlog(0, "reset refcount tree root %llu to be a record block.\n",
  1742. (unsigned long long)ref_root_bh->b_blocknr);
  1743. rb->rf_flags = 0;
  1744. rb->rf_parent = 0;
  1745. rb->rf_cpos = 0;
  1746. memset(&rb->rf_records, 0, sb->s_blocksize -
  1747. offsetof(struct ocfs2_refcount_block, rf_records));
  1748. rb->rf_records.rl_count =
  1749. cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
  1750. }
  1751. ocfs2_journal_dirty(handle, ref_root_bh);
  1752. out:
  1753. return ret;
  1754. }
  1755. int ocfs2_increase_refcount(handle_t *handle,
  1756. struct ocfs2_caching_info *ci,
  1757. struct buffer_head *ref_root_bh,
  1758. u64 cpos, u32 len,
  1759. struct ocfs2_alloc_context *meta_ac,
  1760. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1761. {
  1762. return __ocfs2_increase_refcount(handle, ci, ref_root_bh,
  1763. cpos, len, 1,
  1764. meta_ac, dealloc);
  1765. }
  1766. static int ocfs2_decrease_refcount_rec(handle_t *handle,
  1767. struct ocfs2_caching_info *ci,
  1768. struct buffer_head *ref_root_bh,
  1769. struct buffer_head *ref_leaf_bh,
  1770. int index, u64 cpos, unsigned int len,
  1771. struct ocfs2_alloc_context *meta_ac,
  1772. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1773. {
  1774. int ret;
  1775. struct ocfs2_refcount_block *rb =
  1776. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1777. struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
  1778. BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
  1779. BUG_ON(cpos + len >
  1780. le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
  1781. if (cpos == le64_to_cpu(rec->r_cpos) &&
  1782. len == le32_to_cpu(rec->r_clusters))
  1783. ret = ocfs2_change_refcount_rec(handle, ci,
  1784. ref_leaf_bh, index, 1, -1);
  1785. else {
  1786. struct ocfs2_refcount_rec split = *rec;
  1787. split.r_cpos = cpu_to_le64(cpos);
  1788. split.r_clusters = cpu_to_le32(len);
  1789. le32_add_cpu(&split.r_refcount, -1);
  1790. mlog(0, "split refcount rec, start %llu, "
  1791. "len %u, count %u, original start %llu, len %u\n",
  1792. (unsigned long long)le64_to_cpu(split.r_cpos),
  1793. len, le32_to_cpu(split.r_refcount),
  1794. (unsigned long long)le64_to_cpu(rec->r_cpos),
  1795. le32_to_cpu(rec->r_clusters));
  1796. ret = ocfs2_split_refcount_rec(handle, ci,
  1797. ref_root_bh, ref_leaf_bh,
  1798. &split, index, 1,
  1799. meta_ac, dealloc);
  1800. }
  1801. if (ret) {
  1802. mlog_errno(ret);
  1803. goto out;
  1804. }
  1805. /* Remove the leaf refcount block if it contains no refcount record. */
  1806. if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
  1807. ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
  1808. ref_leaf_bh, meta_ac,
  1809. dealloc);
  1810. if (ret)
  1811. mlog_errno(ret);
  1812. }
  1813. out:
  1814. return ret;
  1815. }
  1816. static int __ocfs2_decrease_refcount(handle_t *handle,
  1817. struct ocfs2_caching_info *ci,
  1818. struct buffer_head *ref_root_bh,
  1819. u64 cpos, u32 len,
  1820. struct ocfs2_alloc_context *meta_ac,
  1821. struct ocfs2_cached_dealloc_ctxt *dealloc,
  1822. int delete)
  1823. {
  1824. int ret = 0, index = 0;
  1825. struct ocfs2_refcount_rec rec;
  1826. unsigned int r_count = 0, r_len;
  1827. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1828. struct buffer_head *ref_leaf_bh = NULL;
  1829. mlog(0, "Tree owner %llu, decrease refcount start %llu, "
  1830. "len %u, delete %u\n",
  1831. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  1832. (unsigned long long)cpos, len, delete);
  1833. while (len) {
  1834. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1835. cpos, len, &rec, &index,
  1836. &ref_leaf_bh);
  1837. if (ret) {
  1838. mlog_errno(ret);
  1839. goto out;
  1840. }
  1841. r_count = le32_to_cpu(rec.r_refcount);
  1842. BUG_ON(r_count == 0);
  1843. if (!delete)
  1844. BUG_ON(r_count > 1);
  1845. r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
  1846. le32_to_cpu(rec.r_clusters)) - cpos;
  1847. ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
  1848. ref_leaf_bh, index,
  1849. cpos, r_len,
  1850. meta_ac, dealloc);
  1851. if (ret) {
  1852. mlog_errno(ret);
  1853. goto out;
  1854. }
  1855. if (le32_to_cpu(rec.r_refcount) == 1 && delete) {
  1856. ret = ocfs2_cache_cluster_dealloc(dealloc,
  1857. ocfs2_clusters_to_blocks(sb, cpos),
  1858. r_len);
  1859. if (ret) {
  1860. mlog_errno(ret);
  1861. goto out;
  1862. }
  1863. }
  1864. cpos += r_len;
  1865. len -= r_len;
  1866. brelse(ref_leaf_bh);
  1867. ref_leaf_bh = NULL;
  1868. }
  1869. out:
  1870. brelse(ref_leaf_bh);
  1871. return ret;
  1872. }
  1873. /* Caller must hold refcount tree lock. */
  1874. int ocfs2_decrease_refcount(struct inode *inode,
  1875. handle_t *handle, u32 cpos, u32 len,
  1876. struct ocfs2_alloc_context *meta_ac,
  1877. struct ocfs2_cached_dealloc_ctxt *dealloc,
  1878. int delete)
  1879. {
  1880. int ret;
  1881. u64 ref_blkno;
  1882. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  1883. struct buffer_head *ref_root_bh = NULL;
  1884. struct ocfs2_refcount_tree *tree;
  1885. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  1886. ret = ocfs2_get_refcount_block(inode, &ref_blkno);
  1887. if (ret) {
  1888. mlog_errno(ret);
  1889. goto out;
  1890. }
  1891. ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
  1892. if (ret) {
  1893. mlog_errno(ret);
  1894. goto out;
  1895. }
  1896. ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
  1897. &ref_root_bh);
  1898. if (ret) {
  1899. mlog_errno(ret);
  1900. goto out;
  1901. }
  1902. ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
  1903. cpos, len, meta_ac, dealloc, delete);
  1904. if (ret)
  1905. mlog_errno(ret);
  1906. out:
  1907. brelse(ref_root_bh);
  1908. return ret;
  1909. }
  1910. /*
  1911. * Mark the already-existing extent at cpos as refcounted for len clusters.
  1912. * This adds the refcount extent flag.
  1913. *
  1914. * If the existing extent is larger than the request, initiate a
  1915. * split. An attempt will be made at merging with adjacent extents.
  1916. *
  1917. * The caller is responsible for passing down meta_ac if we'll need it.
  1918. */
  1919. static int ocfs2_mark_extent_refcounted(struct inode *inode,
  1920. struct ocfs2_extent_tree *et,
  1921. handle_t *handle, u32 cpos,
  1922. u32 len, u32 phys,
  1923. struct ocfs2_alloc_context *meta_ac,
  1924. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1925. {
  1926. int ret;
  1927. mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n",
  1928. inode->i_ino, cpos, len, phys);
  1929. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  1930. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  1931. "tree, but the feature bit is not set in the "
  1932. "super block.", inode->i_ino);
  1933. ret = -EROFS;
  1934. goto out;
  1935. }
  1936. ret = ocfs2_change_extent_flag(handle, et, cpos,
  1937. len, phys, meta_ac, dealloc,
  1938. OCFS2_EXT_REFCOUNTED, 0);
  1939. if (ret)
  1940. mlog_errno(ret);
  1941. out:
  1942. return ret;
  1943. }
  1944. /*
  1945. * Given some contiguous physical clusters, calculate what we need
  1946. * for modifying their refcount.
  1947. */
  1948. static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
  1949. struct ocfs2_caching_info *ci,
  1950. struct buffer_head *ref_root_bh,
  1951. u64 start_cpos,
  1952. u32 clusters,
  1953. int *meta_add,
  1954. int *credits)
  1955. {
  1956. int ret = 0, index, ref_blocks = 0, recs_add = 0;
  1957. u64 cpos = start_cpos;
  1958. struct ocfs2_refcount_block *rb;
  1959. struct ocfs2_refcount_rec rec;
  1960. struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
  1961. u32 len;
  1962. mlog(0, "start_cpos %llu, clusters %u\n",
  1963. (unsigned long long)start_cpos, clusters);
  1964. while (clusters) {
  1965. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1966. cpos, clusters, &rec,
  1967. &index, &ref_leaf_bh);
  1968. if (ret) {
  1969. mlog_errno(ret);
  1970. goto out;
  1971. }
  1972. if (ref_leaf_bh != prev_bh) {
  1973. /*
  1974. * Now we encounter a new leaf block, so calculate
  1975. * whether we need to extend the old leaf.
  1976. */
  1977. if (prev_bh) {
  1978. rb = (struct ocfs2_refcount_block *)
  1979. prev_bh->b_data;
  1980. if (le64_to_cpu(rb->rf_records.rl_used) +
  1981. recs_add >
  1982. le16_to_cpu(rb->rf_records.rl_count))
  1983. ref_blocks++;
  1984. }
  1985. recs_add = 0;
  1986. *credits += 1;
  1987. brelse(prev_bh);
  1988. prev_bh = ref_leaf_bh;
  1989. get_bh(prev_bh);
  1990. }
  1991. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1992. mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu,"
  1993. "rec->r_clusters %u, rec->r_refcount %u, index %d\n",
  1994. recs_add, (unsigned long long)cpos, clusters,
  1995. (unsigned long long)le64_to_cpu(rec.r_cpos),
  1996. le32_to_cpu(rec.r_clusters),
  1997. le32_to_cpu(rec.r_refcount), index);
  1998. len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
  1999. le32_to_cpu(rec.r_clusters)) - cpos;
  2000. /*
  2001. * If the refcount rec already exist, cool. We just need
  2002. * to check whether there is a split. Otherwise we just need
  2003. * to increase the refcount.
  2004. * If we will insert one, increases recs_add.
  2005. *
  2006. * We record all the records which will be inserted to the
  2007. * same refcount block, so that we can tell exactly whether
  2008. * we need a new refcount block or not.
  2009. */
  2010. if (rec.r_refcount) {
  2011. /* Check whether we need a split at the beginning. */
  2012. if (cpos == start_cpos &&
  2013. cpos != le64_to_cpu(rec.r_cpos))
  2014. recs_add++;
  2015. /* Check whether we need a split in the end. */
  2016. if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
  2017. le32_to_cpu(rec.r_clusters))
  2018. recs_add++;
  2019. } else
  2020. recs_add++;
  2021. brelse(ref_leaf_bh);
  2022. ref_leaf_bh = NULL;
  2023. clusters -= len;
  2024. cpos += len;
  2025. }
  2026. if (prev_bh) {
  2027. rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
  2028. if (le64_to_cpu(rb->rf_records.rl_used) + recs_add >
  2029. le16_to_cpu(rb->rf_records.rl_count))
  2030. ref_blocks++;
  2031. *credits += 1;
  2032. }
  2033. if (!ref_blocks)
  2034. goto out;
  2035. mlog(0, "we need ref_blocks %d\n", ref_blocks);
  2036. *meta_add += ref_blocks;
  2037. *credits += ref_blocks;
  2038. /*
  2039. * So we may need ref_blocks to insert into the tree.
  2040. * That also means we need to change the b-tree and add that number
  2041. * of records since we never merge them.
  2042. * We need one more block for expansion since the new created leaf
  2043. * block is also full and needs split.
  2044. */
  2045. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  2046. if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
  2047. struct ocfs2_extent_tree et;
  2048. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  2049. *meta_add += ocfs2_extend_meta_needed(et.et_root_el);
  2050. *credits += ocfs2_calc_extend_credits(sb,
  2051. et.et_root_el,
  2052. ref_blocks);
  2053. } else {
  2054. *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
  2055. *meta_add += 1;
  2056. }
  2057. out:
  2058. brelse(ref_leaf_bh);
  2059. brelse(prev_bh);
  2060. return ret;
  2061. }
  2062. /*
  2063. * For refcount tree, we will decrease some contiguous clusters
  2064. * refcount count, so just go through it to see how many blocks
  2065. * we gonna touch and whether we need to create new blocks.
  2066. *
  2067. * Normally the refcount blocks store these refcount should be
  2068. * continguous also, so that we can get the number easily.
  2069. * As for meta_ac, we will at most add split 2 refcount record and
  2070. * 2 more refcount block, so just check it in a rough way.
  2071. *
  2072. * Caller must hold refcount tree lock.
  2073. */
  2074. int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
  2075. struct buffer_head *di_bh,
  2076. u64 phys_blkno,
  2077. u32 clusters,
  2078. int *credits,
  2079. struct ocfs2_alloc_context **meta_ac)
  2080. {
  2081. int ret, ref_blocks = 0;
  2082. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  2083. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  2084. struct buffer_head *ref_root_bh = NULL;
  2085. struct ocfs2_refcount_tree *tree;
  2086. u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
  2087. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  2088. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  2089. "tree, but the feature bit is not set in the "
  2090. "super block.", inode->i_ino);
  2091. ret = -EROFS;
  2092. goto out;
  2093. }
  2094. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  2095. ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
  2096. le64_to_cpu(di->i_refcount_loc), &tree);
  2097. if (ret) {
  2098. mlog_errno(ret);
  2099. goto out;
  2100. }
  2101. ret = ocfs2_read_refcount_block(&tree->rf_ci,
  2102. le64_to_cpu(di->i_refcount_loc),
  2103. &ref_root_bh);
  2104. if (ret) {
  2105. mlog_errno(ret);
  2106. goto out;
  2107. }
  2108. ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
  2109. &tree->rf_ci,
  2110. ref_root_bh,
  2111. start_cpos, clusters,
  2112. &ref_blocks, credits);
  2113. if (ret) {
  2114. mlog_errno(ret);
  2115. goto out;
  2116. }
  2117. mlog(0, "reserve new metadata %d, credits = %d\n",
  2118. ref_blocks, *credits);
  2119. if (ref_blocks) {
  2120. ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
  2121. ref_blocks, meta_ac);
  2122. if (ret)
  2123. mlog_errno(ret);
  2124. }
  2125. out:
  2126. brelse(ref_root_bh);
  2127. return ret;
  2128. }
  2129. #define MAX_CONTIG_BYTES 1048576
  2130. static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
  2131. {
  2132. return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
  2133. }
  2134. static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
  2135. {
  2136. return ~(ocfs2_cow_contig_clusters(sb) - 1);
  2137. }
  2138. /*
  2139. * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
  2140. * find an offset (start + (n * contig_clusters)) that is closest to cpos
  2141. * while still being less than or equal to it.
  2142. *
  2143. * The goal is to break the extent at a multiple of contig_clusters.
  2144. */
  2145. static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
  2146. unsigned int start,
  2147. unsigned int cpos)
  2148. {
  2149. BUG_ON(start > cpos);
  2150. return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
  2151. }
  2152. /*
  2153. * Given a cluster count of len, pad it out so that it is a multiple
  2154. * of contig_clusters.
  2155. */
  2156. static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
  2157. unsigned int len)
  2158. {
  2159. unsigned int padded =
  2160. (len + (ocfs2_cow_contig_clusters(sb) - 1)) &
  2161. ocfs2_cow_contig_mask(sb);
  2162. /* Did we wrap? */
  2163. if (padded < len)
  2164. padded = UINT_MAX;
  2165. return padded;
  2166. }
  2167. /*
  2168. * Calculate out the start and number of virtual clusters we need to to CoW.
  2169. *
  2170. * cpos is vitual start cluster position we want to do CoW in a
  2171. * file and write_len is the cluster length.
  2172. * max_cpos is the place where we want to stop CoW intentionally.
  2173. *
  2174. * Normal we will start CoW from the beginning of extent record cotaining cpos.
  2175. * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
  2176. * get good I/O from the resulting extent tree.
  2177. */
  2178. static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
  2179. struct ocfs2_extent_list *el,
  2180. u32 cpos,
  2181. u32 write_len,
  2182. u32 max_cpos,
  2183. u32 *cow_start,
  2184. u32 *cow_len)
  2185. {
  2186. int ret = 0;
  2187. int tree_height = le16_to_cpu(el->l_tree_depth), i;
  2188. struct buffer_head *eb_bh = NULL;
  2189. struct ocfs2_extent_block *eb = NULL;
  2190. struct ocfs2_extent_rec *rec;
  2191. unsigned int want_clusters, rec_end = 0;
  2192. int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
  2193. int leaf_clusters;
  2194. BUG_ON(cpos + write_len > max_cpos);
  2195. if (tree_height > 0) {
  2196. ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
  2197. if (ret) {
  2198. mlog_errno(ret);
  2199. goto out;
  2200. }
  2201. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  2202. el = &eb->h_list;
  2203. if (el->l_tree_depth) {
  2204. ocfs2_error(inode->i_sb,
  2205. "Inode %lu has non zero tree depth in "
  2206. "leaf block %llu\n", inode->i_ino,
  2207. (unsigned long long)eb_bh->b_blocknr);
  2208. ret = -EROFS;
  2209. goto out;
  2210. }
  2211. }
  2212. *cow_len = 0;
  2213. for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
  2214. rec = &el->l_recs[i];
  2215. if (ocfs2_is_empty_extent(rec)) {
  2216. mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
  2217. "index %d\n", inode->i_ino, i);
  2218. continue;
  2219. }
  2220. if (le32_to_cpu(rec->e_cpos) +
  2221. le16_to_cpu(rec->e_leaf_clusters) <= cpos)
  2222. continue;
  2223. if (*cow_len == 0) {
  2224. /*
  2225. * We should find a refcounted record in the
  2226. * first pass.
  2227. */
  2228. BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
  2229. *cow_start = le32_to_cpu(rec->e_cpos);
  2230. }
  2231. /*
  2232. * If we encounter a hole, a non-refcounted record or
  2233. * pass the max_cpos, stop the search.
  2234. */
  2235. if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
  2236. (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) ||
  2237. (max_cpos <= le32_to_cpu(rec->e_cpos)))
  2238. break;
  2239. leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
  2240. rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
  2241. if (rec_end > max_cpos) {
  2242. rec_end = max_cpos;
  2243. leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos);
  2244. }
  2245. /*
  2246. * How many clusters do we actually need from
  2247. * this extent? First we see how many we actually
  2248. * need to complete the write. If that's smaller
  2249. * than contig_clusters, we try for contig_clusters.
  2250. */
  2251. if (!*cow_len)
  2252. want_clusters = write_len;
  2253. else
  2254. want_clusters = (cpos + write_len) -
  2255. (*cow_start + *cow_len);
  2256. if (want_clusters < contig_clusters)
  2257. want_clusters = contig_clusters;
  2258. /*
  2259. * If the write does not cover the whole extent, we
  2260. * need to calculate how we're going to split the extent.
  2261. * We try to do it on contig_clusters boundaries.
  2262. *
  2263. * Any extent smaller than contig_clusters will be
  2264. * CoWed in its entirety.
  2265. */
  2266. if (leaf_clusters <= contig_clusters)
  2267. *cow_len += leaf_clusters;
  2268. else if (*cow_len || (*cow_start == cpos)) {
  2269. /*
  2270. * This extent needs to be CoW'd from its
  2271. * beginning, so all we have to do is compute
  2272. * how many clusters to grab. We align
  2273. * want_clusters to the edge of contig_clusters
  2274. * to get better I/O.
  2275. */
  2276. want_clusters = ocfs2_cow_align_length(inode->i_sb,
  2277. want_clusters);
  2278. if (leaf_clusters < want_clusters)
  2279. *cow_len += leaf_clusters;
  2280. else
  2281. *cow_len += want_clusters;
  2282. } else if ((*cow_start + contig_clusters) >=
  2283. (cpos + write_len)) {
  2284. /*
  2285. * Breaking off contig_clusters at the front
  2286. * of the extent will cover our write. That's
  2287. * easy.
  2288. */
  2289. *cow_len = contig_clusters;
  2290. } else if ((rec_end - cpos) <= contig_clusters) {
  2291. /*
  2292. * Breaking off contig_clusters at the tail of
  2293. * this extent will cover cpos.
  2294. */
  2295. *cow_start = rec_end - contig_clusters;
  2296. *cow_len = contig_clusters;
  2297. } else if ((rec_end - cpos) <= want_clusters) {
  2298. /*
  2299. * While we can't fit the entire write in this
  2300. * extent, we know that the write goes from cpos
  2301. * to the end of the extent. Break that off.
  2302. * We try to break it at some multiple of
  2303. * contig_clusters from the front of the extent.
  2304. * Failing that (ie, cpos is within
  2305. * contig_clusters of the front), we'll CoW the
  2306. * entire extent.
  2307. */
  2308. *cow_start = ocfs2_cow_align_start(inode->i_sb,
  2309. *cow_start, cpos);
  2310. *cow_len = rec_end - *cow_start;
  2311. } else {
  2312. /*
  2313. * Ok, the entire write lives in the middle of
  2314. * this extent. Let's try to slice the extent up
  2315. * nicely. Optimally, our CoW region starts at
  2316. * m*contig_clusters from the beginning of the
  2317. * extent and goes for n*contig_clusters,
  2318. * covering the entire write.
  2319. */
  2320. *cow_start = ocfs2_cow_align_start(inode->i_sb,
  2321. *cow_start, cpos);
  2322. want_clusters = (cpos + write_len) - *cow_start;
  2323. want_clusters = ocfs2_cow_align_length(inode->i_sb,
  2324. want_clusters);
  2325. if (*cow_start + want_clusters <= rec_end)
  2326. *cow_len = want_clusters;
  2327. else
  2328. *cow_len = rec_end - *cow_start;
  2329. }
  2330. /* Have we covered our entire write yet? */
  2331. if ((*cow_start + *cow_len) >= (cpos + write_len))
  2332. break;
  2333. /*
  2334. * If we reach the end of the extent block and don't get enough
  2335. * clusters, continue with the next extent block if possible.
  2336. */
  2337. if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
  2338. eb && eb->h_next_leaf_blk) {
  2339. brelse(eb_bh);
  2340. eb_bh = NULL;
  2341. ret = ocfs2_read_extent_block(INODE_CACHE(inode),
  2342. le64_to_cpu(eb->h_next_leaf_blk),
  2343. &eb_bh);
  2344. if (ret) {
  2345. mlog_errno(ret);
  2346. goto out;
  2347. }
  2348. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  2349. el = &eb->h_list;
  2350. i = -1;
  2351. }
  2352. }
  2353. out:
  2354. brelse(eb_bh);
  2355. return ret;
  2356. }
  2357. /*
  2358. * Prepare meta_ac, data_ac and calculate credits when we want to add some
  2359. * num_clusters in data_tree "et" and change the refcount for the old
  2360. * clusters(starting form p_cluster) in the refcount tree.
  2361. *
  2362. * Note:
  2363. * 1. since we may split the old tree, so we at most will need num_clusters + 2
  2364. * more new leaf records.
  2365. * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
  2366. * just give data_ac = NULL.
  2367. */
  2368. static int ocfs2_lock_refcount_allocators(struct super_block *sb,
  2369. u32 p_cluster, u32 num_clusters,
  2370. struct ocfs2_extent_tree *et,
  2371. struct ocfs2_caching_info *ref_ci,
  2372. struct buffer_head *ref_root_bh,
  2373. struct ocfs2_alloc_context **meta_ac,
  2374. struct ocfs2_alloc_context **data_ac,
  2375. int *credits)
  2376. {
  2377. int ret = 0, meta_add = 0;
  2378. int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et);
  2379. if (num_free_extents < 0) {
  2380. ret = num_free_extents;
  2381. mlog_errno(ret);
  2382. goto out;
  2383. }
  2384. if (num_free_extents < num_clusters + 2)
  2385. meta_add =
  2386. ocfs2_extend_meta_needed(et->et_root_el);
  2387. *credits += ocfs2_calc_extend_credits(sb, et->et_root_el,
  2388. num_clusters + 2);
  2389. ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
  2390. p_cluster, num_clusters,
  2391. &meta_add, credits);
  2392. if (ret) {
  2393. mlog_errno(ret);
  2394. goto out;
  2395. }
  2396. mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n",
  2397. meta_add, num_clusters, *credits);
  2398. ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
  2399. meta_ac);
  2400. if (ret) {
  2401. mlog_errno(ret);
  2402. goto out;
  2403. }
  2404. if (data_ac) {
  2405. ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
  2406. data_ac);
  2407. if (ret)
  2408. mlog_errno(ret);
  2409. }
  2410. out:
  2411. if (ret) {
  2412. if (*meta_ac) {
  2413. ocfs2_free_alloc_context(*meta_ac);
  2414. *meta_ac = NULL;
  2415. }
  2416. }
  2417. return ret;
  2418. }
  2419. static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
  2420. {
  2421. BUG_ON(buffer_dirty(bh));
  2422. clear_buffer_mapped(bh);
  2423. return 0;
  2424. }
  2425. static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
  2426. struct ocfs2_cow_context *context,
  2427. u32 cpos, u32 old_cluster,
  2428. u32 new_cluster, u32 new_len)
  2429. {
  2430. int ret = 0, partial;
  2431. struct ocfs2_caching_info *ci = context->data_et.et_ci;
  2432. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  2433. u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
  2434. struct page *page;
  2435. pgoff_t page_index;
  2436. unsigned int from, to;
  2437. loff_t offset, end, map_end;
  2438. struct address_space *mapping = context->inode->i_mapping;
  2439. mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster,
  2440. new_cluster, new_len, cpos);
  2441. offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
  2442. end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
  2443. while (offset < end) {
  2444. page_index = offset >> PAGE_CACHE_SHIFT;
  2445. map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
  2446. if (map_end > end)
  2447. map_end = end;
  2448. /* from, to is the offset within the page. */
  2449. from = offset & (PAGE_CACHE_SIZE - 1);
  2450. to = PAGE_CACHE_SIZE;
  2451. if (map_end & (PAGE_CACHE_SIZE - 1))
  2452. to = map_end & (PAGE_CACHE_SIZE - 1);
  2453. page = grab_cache_page(mapping, page_index);
  2454. /* This page can't be dirtied before we CoW it out. */
  2455. BUG_ON(PageDirty(page));
  2456. if (!PageUptodate(page)) {
  2457. ret = block_read_full_page(page, ocfs2_get_block);
  2458. if (ret) {
  2459. mlog_errno(ret);
  2460. goto unlock;
  2461. }
  2462. lock_page(page);
  2463. }
  2464. if (page_has_buffers(page)) {
  2465. ret = walk_page_buffers(handle, page_buffers(page),
  2466. from, to, &partial,
  2467. ocfs2_clear_cow_buffer);
  2468. if (ret) {
  2469. mlog_errno(ret);
  2470. goto unlock;
  2471. }
  2472. }
  2473. ocfs2_map_and_dirty_page(context->inode,
  2474. handle, from, to,
  2475. page, 0, &new_block);
  2476. mark_page_accessed(page);
  2477. unlock:
  2478. unlock_page(page);
  2479. page_cache_release(page);
  2480. page = NULL;
  2481. offset = map_end;
  2482. if (ret)
  2483. break;
  2484. }
  2485. return ret;
  2486. }
  2487. static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
  2488. struct ocfs2_cow_context *context,
  2489. u32 cpos, u32 old_cluster,
  2490. u32 new_cluster, u32 new_len)
  2491. {
  2492. int ret = 0;
  2493. struct super_block *sb = context->inode->i_sb;
  2494. struct ocfs2_caching_info *ci = context->data_et.et_ci;
  2495. int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
  2496. u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
  2497. u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
  2498. struct ocfs2_super *osb = OCFS2_SB(sb);
  2499. struct buffer_head *old_bh = NULL;
  2500. struct buffer_head *new_bh = NULL;
  2501. mlog(0, "old_cluster %u, new %u, len %u\n", old_cluster,
  2502. new_cluster, new_len);
  2503. for (i = 0; i < blocks; i++, old_block++, new_block++) {
  2504. new_bh = sb_getblk(osb->sb, new_block);
  2505. if (new_bh == NULL) {
  2506. ret = -EIO;
  2507. mlog_errno(ret);
  2508. break;
  2509. }
  2510. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  2511. ret = ocfs2_read_block(ci, old_block, &old_bh, NULL);
  2512. if (ret) {
  2513. mlog_errno(ret);
  2514. break;
  2515. }
  2516. ret = ocfs2_journal_access(handle, ci, new_bh,
  2517. OCFS2_JOURNAL_ACCESS_CREATE);
  2518. if (ret) {
  2519. mlog_errno(ret);
  2520. break;
  2521. }
  2522. memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
  2523. ret = ocfs2_journal_dirty(handle, new_bh);
  2524. if (ret) {
  2525. mlog_errno(ret);
  2526. break;
  2527. }
  2528. brelse(new_bh);
  2529. brelse(old_bh);
  2530. new_bh = NULL;
  2531. old_bh = NULL;
  2532. }
  2533. brelse(new_bh);
  2534. brelse(old_bh);
  2535. return ret;
  2536. }
  2537. static int ocfs2_clear_ext_refcount(handle_t *handle,
  2538. struct ocfs2_extent_tree *et,
  2539. u32 cpos, u32 p_cluster, u32 len,
  2540. unsigned int ext_flags,
  2541. struct ocfs2_alloc_context *meta_ac,
  2542. struct ocfs2_cached_dealloc_ctxt *dealloc)
  2543. {
  2544. int ret, index;
  2545. struct ocfs2_extent_rec replace_rec;
  2546. struct ocfs2_path *path = NULL;
  2547. struct ocfs2_extent_list *el;
  2548. struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
  2549. u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
  2550. mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n",
  2551. (unsigned long long)ino, cpos, len, p_cluster, ext_flags);
  2552. memset(&replace_rec, 0, sizeof(replace_rec));
  2553. replace_rec.e_cpos = cpu_to_le32(cpos);
  2554. replace_rec.e_leaf_clusters = cpu_to_le16(len);
  2555. replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
  2556. p_cluster));
  2557. replace_rec.e_flags = ext_flags;
  2558. replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
  2559. path = ocfs2_new_path_from_et(et);
  2560. if (!path) {
  2561. ret = -ENOMEM;
  2562. mlog_errno(ret);
  2563. goto out;
  2564. }
  2565. ret = ocfs2_find_path(et->et_ci, path, cpos);
  2566. if (ret) {
  2567. mlog_errno(ret);
  2568. goto out;
  2569. }
  2570. el = path_leaf_el(path);
  2571. index = ocfs2_search_extent_list(el, cpos);
  2572. if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
  2573. ocfs2_error(sb,
  2574. "Inode %llu has an extent at cpos %u which can no "
  2575. "longer be found.\n",
  2576. (unsigned long long)ino, cpos);
  2577. ret = -EROFS;
  2578. goto out;
  2579. }
  2580. ret = ocfs2_split_extent(handle, et, path, index,
  2581. &replace_rec, meta_ac, dealloc);
  2582. if (ret)
  2583. mlog_errno(ret);
  2584. out:
  2585. ocfs2_free_path(path);
  2586. return ret;
  2587. }
  2588. static int ocfs2_replace_clusters(handle_t *handle,
  2589. struct ocfs2_cow_context *context,
  2590. u32 cpos, u32 old,
  2591. u32 new, u32 len,
  2592. unsigned int ext_flags)
  2593. {
  2594. int ret;
  2595. struct ocfs2_caching_info *ci = context->data_et.et_ci;
  2596. u64 ino = ocfs2_metadata_cache_owner(ci);
  2597. mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n",
  2598. (unsigned long long)ino, cpos, old, new, len, ext_flags);
  2599. /*If the old clusters is unwritten, no need to duplicate. */
  2600. if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
  2601. ret = context->cow_duplicate_clusters(handle, context, cpos,
  2602. old, new, len);
  2603. if (ret) {
  2604. mlog_errno(ret);
  2605. goto out;
  2606. }
  2607. }
  2608. ret = ocfs2_clear_ext_refcount(handle, &context->data_et,
  2609. cpos, new, len, ext_flags,
  2610. context->meta_ac, &context->dealloc);
  2611. if (ret)
  2612. mlog_errno(ret);
  2613. out:
  2614. return ret;
  2615. }
  2616. static int ocfs2_cow_sync_writeback(struct super_block *sb,
  2617. struct ocfs2_cow_context *context,
  2618. u32 cpos, u32 num_clusters)
  2619. {
  2620. int ret = 0;
  2621. loff_t offset, end, map_end;
  2622. pgoff_t page_index;
  2623. struct page *page;
  2624. if (ocfs2_should_order_data(context->inode))
  2625. return 0;
  2626. offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
  2627. end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
  2628. ret = filemap_fdatawrite_range(context->inode->i_mapping,
  2629. offset, end - 1);
  2630. if (ret < 0) {
  2631. mlog_errno(ret);
  2632. return ret;
  2633. }
  2634. while (offset < end) {
  2635. page_index = offset >> PAGE_CACHE_SHIFT;
  2636. map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
  2637. if (map_end > end)
  2638. map_end = end;
  2639. page = grab_cache_page(context->inode->i_mapping, page_index);
  2640. BUG_ON(!page);
  2641. wait_on_page_writeback(page);
  2642. if (PageError(page)) {
  2643. ret = -EIO;
  2644. mlog_errno(ret);
  2645. } else
  2646. mark_page_accessed(page);
  2647. unlock_page(page);
  2648. page_cache_release(page);
  2649. page = NULL;
  2650. offset = map_end;
  2651. if (ret)
  2652. break;
  2653. }
  2654. return ret;
  2655. }
  2656. static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context,
  2657. u32 v_cluster, u32 *p_cluster,
  2658. u32 *num_clusters,
  2659. unsigned int *extent_flags)
  2660. {
  2661. return ocfs2_get_clusters(context->inode, v_cluster, p_cluster,
  2662. num_clusters, extent_flags);
  2663. }
  2664. static int ocfs2_make_clusters_writable(struct super_block *sb,
  2665. struct ocfs2_cow_context *context,
  2666. u32 cpos, u32 p_cluster,
  2667. u32 num_clusters, unsigned int e_flags)
  2668. {
  2669. int ret, delete, index, credits = 0;
  2670. u32 new_bit, new_len;
  2671. unsigned int set_len;
  2672. struct ocfs2_super *osb = OCFS2_SB(sb);
  2673. handle_t *handle;
  2674. struct buffer_head *ref_leaf_bh = NULL;
  2675. struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
  2676. struct ocfs2_refcount_rec rec;
  2677. mlog(0, "cpos %u, p_cluster %u, num_clusters %u, e_flags %u\n",
  2678. cpos, p_cluster, num_clusters, e_flags);
  2679. ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
  2680. &context->data_et,
  2681. ref_ci,
  2682. context->ref_root_bh,
  2683. &context->meta_ac,
  2684. &context->data_ac, &credits);
  2685. if (ret) {
  2686. mlog_errno(ret);
  2687. return ret;
  2688. }
  2689. if (context->post_refcount)
  2690. credits += context->post_refcount->credits;
  2691. credits += context->extra_credits;
  2692. handle = ocfs2_start_trans(osb, credits);
  2693. if (IS_ERR(handle)) {
  2694. ret = PTR_ERR(handle);
  2695. mlog_errno(ret);
  2696. goto out;
  2697. }
  2698. while (num_clusters) {
  2699. ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
  2700. p_cluster, num_clusters,
  2701. &rec, &index, &ref_leaf_bh);
  2702. if (ret) {
  2703. mlog_errno(ret);
  2704. goto out_commit;
  2705. }
  2706. BUG_ON(!rec.r_refcount);
  2707. set_len = min((u64)p_cluster + num_clusters,
  2708. le64_to_cpu(rec.r_cpos) +
  2709. le32_to_cpu(rec.r_clusters)) - p_cluster;
  2710. /*
  2711. * There are many different situation here.
  2712. * 1. If refcount == 1, remove the flag and don't COW.
  2713. * 2. If refcount > 1, allocate clusters.
  2714. * Here we may not allocate r_len once at a time, so continue
  2715. * until we reach num_clusters.
  2716. */
  2717. if (le32_to_cpu(rec.r_refcount) == 1) {
  2718. delete = 0;
  2719. ret = ocfs2_clear_ext_refcount(handle,
  2720. &context->data_et,
  2721. cpos, p_cluster,
  2722. set_len, e_flags,
  2723. context->meta_ac,
  2724. &context->dealloc);
  2725. if (ret) {
  2726. mlog_errno(ret);
  2727. goto out_commit;
  2728. }
  2729. } else {
  2730. delete = 1;
  2731. ret = __ocfs2_claim_clusters(osb, handle,
  2732. context->data_ac,
  2733. 1, set_len,
  2734. &new_bit, &new_len);
  2735. if (ret) {
  2736. mlog_errno(ret);
  2737. goto out_commit;
  2738. }
  2739. ret = ocfs2_replace_clusters(handle, context,
  2740. cpos, p_cluster, new_bit,
  2741. new_len, e_flags);
  2742. if (ret) {
  2743. mlog_errno(ret);
  2744. goto out_commit;
  2745. }
  2746. set_len = new_len;
  2747. }
  2748. ret = __ocfs2_decrease_refcount(handle, ref_ci,
  2749. context->ref_root_bh,
  2750. p_cluster, set_len,
  2751. context->meta_ac,
  2752. &context->dealloc, delete);
  2753. if (ret) {
  2754. mlog_errno(ret);
  2755. goto out_commit;
  2756. }
  2757. cpos += set_len;
  2758. p_cluster += set_len;
  2759. num_clusters -= set_len;
  2760. brelse(ref_leaf_bh);
  2761. ref_leaf_bh = NULL;
  2762. }
  2763. /* handle any post_cow action. */
  2764. if (context->post_refcount && context->post_refcount->func) {
  2765. ret = context->post_refcount->func(context->inode, handle,
  2766. context->post_refcount->para);
  2767. if (ret) {
  2768. mlog_errno(ret);
  2769. goto out_commit;
  2770. }
  2771. }
  2772. /*
  2773. * Here we should write the new page out first if we are
  2774. * in write-back mode.
  2775. */
  2776. if (context->get_clusters == ocfs2_di_get_clusters) {
  2777. ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
  2778. if (ret)
  2779. mlog_errno(ret);
  2780. }
  2781. out_commit:
  2782. ocfs2_commit_trans(osb, handle);
  2783. out:
  2784. if (context->data_ac) {
  2785. ocfs2_free_alloc_context(context->data_ac);
  2786. context->data_ac = NULL;
  2787. }
  2788. if (context->meta_ac) {
  2789. ocfs2_free_alloc_context(context->meta_ac);
  2790. context->meta_ac = NULL;
  2791. }
  2792. brelse(ref_leaf_bh);
  2793. return ret;
  2794. }
  2795. static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
  2796. {
  2797. int ret = 0;
  2798. struct inode *inode = context->inode;
  2799. u32 cow_start = context->cow_start, cow_len = context->cow_len;
  2800. u32 p_cluster, num_clusters;
  2801. unsigned int ext_flags;
  2802. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  2803. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  2804. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  2805. "tree, but the feature bit is not set in the "
  2806. "super block.", inode->i_ino);
  2807. return -EROFS;
  2808. }
  2809. ocfs2_init_dealloc_ctxt(&context->dealloc);
  2810. while (cow_len) {
  2811. ret = context->get_clusters(context, cow_start, &p_cluster,
  2812. &num_clusters, &ext_flags);
  2813. if (ret) {
  2814. mlog_errno(ret);
  2815. break;
  2816. }
  2817. BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
  2818. if (cow_len < num_clusters)
  2819. num_clusters = cow_len;
  2820. ret = ocfs2_make_clusters_writable(inode->i_sb, context,
  2821. cow_start, p_cluster,
  2822. num_clusters, ext_flags);
  2823. if (ret) {
  2824. mlog_errno(ret);
  2825. break;
  2826. }
  2827. cow_len -= num_clusters;
  2828. cow_start += num_clusters;
  2829. }
  2830. if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
  2831. ocfs2_schedule_truncate_log_flush(osb, 1);
  2832. ocfs2_run_deallocs(osb, &context->dealloc);
  2833. }
  2834. return ret;
  2835. }
  2836. /*
  2837. * Starting at cpos, try to CoW write_len clusters. Don't CoW
  2838. * past max_cpos. This will stop when it runs into a hole or an
  2839. * unrefcounted extent.
  2840. */
  2841. static int ocfs2_refcount_cow_hunk(struct inode *inode,
  2842. struct buffer_head *di_bh,
  2843. u32 cpos, u32 write_len, u32 max_cpos)
  2844. {
  2845. int ret;
  2846. u32 cow_start = 0, cow_len = 0;
  2847. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  2848. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  2849. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  2850. struct buffer_head *ref_root_bh = NULL;
  2851. struct ocfs2_refcount_tree *ref_tree;
  2852. struct ocfs2_cow_context *context = NULL;
  2853. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  2854. ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list,
  2855. cpos, write_len, max_cpos,
  2856. &cow_start, &cow_len);
  2857. if (ret) {
  2858. mlog_errno(ret);
  2859. goto out;
  2860. }
  2861. mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, "
  2862. "cow_len %u\n", inode->i_ino,
  2863. cpos, write_len, cow_start, cow_len);
  2864. BUG_ON(cow_len == 0);
  2865. context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
  2866. if (!context) {
  2867. ret = -ENOMEM;
  2868. mlog_errno(ret);
  2869. goto out;
  2870. }
  2871. ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
  2872. 1, &ref_tree, &ref_root_bh);
  2873. if (ret) {
  2874. mlog_errno(ret);
  2875. goto out;
  2876. }
  2877. context->inode = inode;
  2878. context->cow_start = cow_start;
  2879. context->cow_len = cow_len;
  2880. context->ref_tree = ref_tree;
  2881. context->ref_root_bh = ref_root_bh;
  2882. context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
  2883. context->get_clusters = ocfs2_di_get_clusters;
  2884. ocfs2_init_dinode_extent_tree(&context->data_et,
  2885. INODE_CACHE(inode), di_bh);
  2886. ret = ocfs2_replace_cow(context);
  2887. if (ret)
  2888. mlog_errno(ret);
  2889. /*
  2890. * truncate the extent map here since no matter whether we meet with
  2891. * any error during the action, we shouldn't trust cached extent map
  2892. * any more.
  2893. */
  2894. ocfs2_extent_map_trunc(inode, cow_start);
  2895. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  2896. brelse(ref_root_bh);
  2897. out:
  2898. kfree(context);
  2899. return ret;
  2900. }
  2901. /*
  2902. * CoW any and all clusters between cpos and cpos+write_len.
  2903. * Don't CoW past max_cpos. If this returns successfully, all
  2904. * clusters between cpos and cpos+write_len are safe to modify.
  2905. */
  2906. int ocfs2_refcount_cow(struct inode *inode,
  2907. struct buffer_head *di_bh,
  2908. u32 cpos, u32 write_len, u32 max_cpos)
  2909. {
  2910. int ret = 0;
  2911. u32 p_cluster, num_clusters;
  2912. unsigned int ext_flags;
  2913. while (write_len) {
  2914. ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
  2915. &num_clusters, &ext_flags);
  2916. if (ret) {
  2917. mlog_errno(ret);
  2918. break;
  2919. }
  2920. if (write_len < num_clusters)
  2921. num_clusters = write_len;
  2922. if (ext_flags & OCFS2_EXT_REFCOUNTED) {
  2923. ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
  2924. num_clusters, max_cpos);
  2925. if (ret) {
  2926. mlog_errno(ret);
  2927. break;
  2928. }
  2929. }
  2930. write_len -= num_clusters;
  2931. cpos += num_clusters;
  2932. }
  2933. return ret;
  2934. }
  2935. static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context,
  2936. u32 v_cluster, u32 *p_cluster,
  2937. u32 *num_clusters,
  2938. unsigned int *extent_flags)
  2939. {
  2940. struct inode *inode = context->inode;
  2941. struct ocfs2_xattr_value_root *xv = context->cow_object;
  2942. return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster,
  2943. num_clusters, &xv->xr_list,
  2944. extent_flags);
  2945. }
  2946. /*
  2947. * Given a xattr value root, calculate the most meta/credits we need for
  2948. * refcount tree change if we truncate it to 0.
  2949. */
  2950. int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
  2951. struct ocfs2_caching_info *ref_ci,
  2952. struct buffer_head *ref_root_bh,
  2953. struct ocfs2_xattr_value_root *xv,
  2954. int *meta_add, int *credits)
  2955. {
  2956. int ret = 0, index, ref_blocks = 0;
  2957. u32 p_cluster, num_clusters;
  2958. u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters);
  2959. struct ocfs2_refcount_block *rb;
  2960. struct ocfs2_refcount_rec rec;
  2961. struct buffer_head *ref_leaf_bh = NULL;
  2962. while (cpos < clusters) {
  2963. ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
  2964. &num_clusters, &xv->xr_list,
  2965. NULL);
  2966. if (ret) {
  2967. mlog_errno(ret);
  2968. goto out;
  2969. }
  2970. cpos += num_clusters;
  2971. while (num_clusters) {
  2972. ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh,
  2973. p_cluster, num_clusters,
  2974. &rec, &index,
  2975. &ref_leaf_bh);
  2976. if (ret) {
  2977. mlog_errno(ret);
  2978. goto out;
  2979. }
  2980. BUG_ON(!rec.r_refcount);
  2981. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  2982. /*
  2983. * We really don't know whether the other clusters is in
  2984. * this refcount block or not, so just take the worst
  2985. * case that all the clusters are in this block and each
  2986. * one will split a refcount rec, so totally we need
  2987. * clusters * 2 new refcount rec.
  2988. */
  2989. if (le64_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
  2990. le16_to_cpu(rb->rf_records.rl_count))
  2991. ref_blocks++;
  2992. *credits += 1;
  2993. brelse(ref_leaf_bh);
  2994. ref_leaf_bh = NULL;
  2995. if (num_clusters <= le32_to_cpu(rec.r_clusters))
  2996. break;
  2997. else
  2998. num_clusters -= le32_to_cpu(rec.r_clusters);
  2999. p_cluster += num_clusters;
  3000. }
  3001. }
  3002. *meta_add += ref_blocks;
  3003. if (!ref_blocks)
  3004. goto out;
  3005. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  3006. if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
  3007. *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
  3008. else {
  3009. struct ocfs2_extent_tree et;
  3010. ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
  3011. *credits += ocfs2_calc_extend_credits(inode->i_sb,
  3012. et.et_root_el,
  3013. ref_blocks);
  3014. }
  3015. out:
  3016. brelse(ref_leaf_bh);
  3017. return ret;
  3018. }
  3019. /*
  3020. * Do CoW for xattr.
  3021. */
  3022. int ocfs2_refcount_cow_xattr(struct inode *inode,
  3023. struct ocfs2_dinode *di,
  3024. struct ocfs2_xattr_value_buf *vb,
  3025. struct ocfs2_refcount_tree *ref_tree,
  3026. struct buffer_head *ref_root_bh,
  3027. u32 cpos, u32 write_len,
  3028. struct ocfs2_post_refcount *post)
  3029. {
  3030. int ret;
  3031. struct ocfs2_xattr_value_root *xv = vb->vb_xv;
  3032. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  3033. struct ocfs2_cow_context *context = NULL;
  3034. u32 cow_start, cow_len;
  3035. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  3036. ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list,
  3037. cpos, write_len, UINT_MAX,
  3038. &cow_start, &cow_len);
  3039. if (ret) {
  3040. mlog_errno(ret);
  3041. goto out;
  3042. }
  3043. BUG_ON(cow_len == 0);
  3044. context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
  3045. if (!context) {
  3046. ret = -ENOMEM;
  3047. mlog_errno(ret);
  3048. goto out;
  3049. }
  3050. context->inode = inode;
  3051. context->cow_start = cow_start;
  3052. context->cow_len = cow_len;
  3053. context->ref_tree = ref_tree;
  3054. context->ref_root_bh = ref_root_bh;;
  3055. context->cow_object = xv;
  3056. context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd;
  3057. /* We need the extra credits for duplicate_clusters by jbd. */
  3058. context->extra_credits =
  3059. ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len;
  3060. context->get_clusters = ocfs2_xattr_value_get_clusters;
  3061. context->post_refcount = post;
  3062. ocfs2_init_xattr_value_extent_tree(&context->data_et,
  3063. INODE_CACHE(inode), vb);
  3064. ret = ocfs2_replace_cow(context);
  3065. if (ret)
  3066. mlog_errno(ret);
  3067. out:
  3068. kfree(context);
  3069. return ret;
  3070. }
  3071. /*
  3072. * Insert a new extent into refcount tree and mark a extent rec
  3073. * as refcounted in the dinode tree.
  3074. */
  3075. int ocfs2_add_refcount_flag(struct inode *inode,
  3076. struct ocfs2_extent_tree *data_et,
  3077. struct ocfs2_caching_info *ref_ci,
  3078. struct buffer_head *ref_root_bh,
  3079. u32 cpos, u32 p_cluster, u32 num_clusters,
  3080. struct ocfs2_cached_dealloc_ctxt *dealloc,
  3081. struct ocfs2_post_refcount *post)
  3082. {
  3083. int ret;
  3084. handle_t *handle;
  3085. int credits = 1, ref_blocks = 0;
  3086. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3087. struct ocfs2_alloc_context *meta_ac = NULL;
  3088. ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
  3089. ref_ci, ref_root_bh,
  3090. p_cluster, num_clusters,
  3091. &ref_blocks, &credits);
  3092. if (ret) {
  3093. mlog_errno(ret);
  3094. goto out;
  3095. }
  3096. mlog(0, "reserve new metadata %d, credits = %d\n",
  3097. ref_blocks, credits);
  3098. if (ref_blocks) {
  3099. ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
  3100. ref_blocks, &meta_ac);
  3101. if (ret) {
  3102. mlog_errno(ret);
  3103. goto out;
  3104. }
  3105. }
  3106. if (post)
  3107. credits += post->credits;
  3108. handle = ocfs2_start_trans(osb, credits);
  3109. if (IS_ERR(handle)) {
  3110. ret = PTR_ERR(handle);
  3111. mlog_errno(ret);
  3112. goto out;
  3113. }
  3114. ret = ocfs2_mark_extent_refcounted(inode, data_et, handle,
  3115. cpos, num_clusters, p_cluster,
  3116. meta_ac, dealloc);
  3117. if (ret) {
  3118. mlog_errno(ret);
  3119. goto out_commit;
  3120. }
  3121. ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
  3122. p_cluster, num_clusters, 0,
  3123. meta_ac, dealloc);
  3124. if (ret) {
  3125. mlog_errno(ret);
  3126. goto out_commit;
  3127. }
  3128. if (post && post->func) {
  3129. ret = post->func(inode, handle, post->para);
  3130. if (ret)
  3131. mlog_errno(ret);
  3132. }
  3133. out_commit:
  3134. ocfs2_commit_trans(osb, handle);
  3135. out:
  3136. if (meta_ac)
  3137. ocfs2_free_alloc_context(meta_ac);
  3138. return ret;
  3139. }
  3140. static int ocfs2_change_ctime(struct inode *inode,
  3141. struct buffer_head *di_bh)
  3142. {
  3143. int ret;
  3144. handle_t *handle;
  3145. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  3146. handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
  3147. OCFS2_INODE_UPDATE_CREDITS);
  3148. if (IS_ERR(handle)) {
  3149. ret = PTR_ERR(handle);
  3150. mlog_errno(ret);
  3151. goto out;
  3152. }
  3153. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  3154. OCFS2_JOURNAL_ACCESS_WRITE);
  3155. if (ret) {
  3156. mlog_errno(ret);
  3157. goto out_commit;
  3158. }
  3159. inode->i_ctime = CURRENT_TIME;
  3160. di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  3161. di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  3162. ocfs2_journal_dirty(handle, di_bh);
  3163. out_commit:
  3164. ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
  3165. out:
  3166. return ret;
  3167. }
  3168. static int ocfs2_attach_refcount_tree(struct inode *inode,
  3169. struct buffer_head *di_bh)
  3170. {
  3171. int ret, data_changed = 0;
  3172. struct buffer_head *ref_root_bh = NULL;
  3173. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  3174. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  3175. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3176. struct ocfs2_refcount_tree *ref_tree;
  3177. unsigned int ext_flags;
  3178. loff_t size;
  3179. u32 cpos, num_clusters, clusters, p_cluster;
  3180. struct ocfs2_cached_dealloc_ctxt dealloc;
  3181. struct ocfs2_extent_tree di_et;
  3182. ocfs2_init_dealloc_ctxt(&dealloc);
  3183. if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) {
  3184. ret = ocfs2_create_refcount_tree(inode, di_bh);
  3185. if (ret) {
  3186. mlog_errno(ret);
  3187. goto out;
  3188. }
  3189. }
  3190. BUG_ON(!di->i_refcount_loc);
  3191. ret = ocfs2_lock_refcount_tree(osb,
  3192. le64_to_cpu(di->i_refcount_loc), 1,
  3193. &ref_tree, &ref_root_bh);
  3194. if (ret) {
  3195. mlog_errno(ret);
  3196. goto out;
  3197. }
  3198. ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
  3199. size = i_size_read(inode);
  3200. clusters = ocfs2_clusters_for_bytes(inode->i_sb, size);
  3201. cpos = 0;
  3202. while (cpos < clusters) {
  3203. ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
  3204. &num_clusters, &ext_flags);
  3205. if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
  3206. ret = ocfs2_add_refcount_flag(inode, &di_et,
  3207. &ref_tree->rf_ci,
  3208. ref_root_bh, cpos,
  3209. p_cluster, num_clusters,
  3210. &dealloc, NULL);
  3211. if (ret) {
  3212. mlog_errno(ret);
  3213. goto unlock;
  3214. }
  3215. data_changed = 1;
  3216. }
  3217. cpos += num_clusters;
  3218. }
  3219. if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
  3220. ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
  3221. &ref_tree->rf_ci,
  3222. ref_root_bh,
  3223. &dealloc);
  3224. if (ret) {
  3225. mlog_errno(ret);
  3226. goto unlock;
  3227. }
  3228. }
  3229. if (data_changed) {
  3230. ret = ocfs2_change_ctime(inode, di_bh);
  3231. if (ret)
  3232. mlog_errno(ret);
  3233. }
  3234. unlock:
  3235. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  3236. brelse(ref_root_bh);
  3237. if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) {
  3238. ocfs2_schedule_truncate_log_flush(osb, 1);
  3239. ocfs2_run_deallocs(osb, &dealloc);
  3240. }
  3241. out:
  3242. /*
  3243. * Empty the extent map so that we may get the right extent
  3244. * record from the disk.
  3245. */
  3246. ocfs2_extent_map_trunc(inode, 0);
  3247. return ret;
  3248. }
  3249. static int ocfs2_add_refcounted_extent(struct inode *inode,
  3250. struct ocfs2_extent_tree *et,
  3251. struct ocfs2_caching_info *ref_ci,
  3252. struct buffer_head *ref_root_bh,
  3253. u32 cpos, u32 p_cluster, u32 num_clusters,
  3254. unsigned int ext_flags,
  3255. struct ocfs2_cached_dealloc_ctxt *dealloc)
  3256. {
  3257. int ret;
  3258. handle_t *handle;
  3259. int credits = 0;
  3260. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3261. struct ocfs2_alloc_context *meta_ac = NULL;
  3262. ret = ocfs2_lock_refcount_allocators(inode->i_sb,
  3263. p_cluster, num_clusters,
  3264. et, ref_ci,
  3265. ref_root_bh, &meta_ac,
  3266. NULL, &credits);
  3267. if (ret) {
  3268. mlog_errno(ret);
  3269. goto out;
  3270. }
  3271. handle = ocfs2_start_trans(osb, credits);
  3272. if (IS_ERR(handle)) {
  3273. ret = PTR_ERR(handle);
  3274. mlog_errno(ret);
  3275. goto out;
  3276. }
  3277. ret = ocfs2_insert_extent(handle, et, cpos,
  3278. cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
  3279. p_cluster)),
  3280. num_clusters, ext_flags, meta_ac);
  3281. if (ret) {
  3282. mlog_errno(ret);
  3283. goto out_commit;
  3284. }
  3285. ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
  3286. p_cluster, num_clusters,
  3287. meta_ac, dealloc);
  3288. if (ret)
  3289. mlog_errno(ret);
  3290. out_commit:
  3291. ocfs2_commit_trans(osb, handle);
  3292. out:
  3293. if (meta_ac)
  3294. ocfs2_free_alloc_context(meta_ac);
  3295. return ret;
  3296. }
  3297. static int ocfs2_duplicate_extent_list(struct inode *s_inode,
  3298. struct inode *t_inode,
  3299. struct buffer_head *t_bh,
  3300. struct ocfs2_caching_info *ref_ci,
  3301. struct buffer_head *ref_root_bh,
  3302. struct ocfs2_cached_dealloc_ctxt *dealloc)
  3303. {
  3304. int ret = 0;
  3305. u32 p_cluster, num_clusters, clusters, cpos;
  3306. loff_t size;
  3307. unsigned int ext_flags;
  3308. struct ocfs2_extent_tree et;
  3309. ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh);
  3310. size = i_size_read(s_inode);
  3311. clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size);
  3312. cpos = 0;
  3313. while (cpos < clusters) {
  3314. ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
  3315. &num_clusters, &ext_flags);
  3316. if (p_cluster) {
  3317. ret = ocfs2_add_refcounted_extent(t_inode, &et,
  3318. ref_ci, ref_root_bh,
  3319. cpos, p_cluster,
  3320. num_clusters,
  3321. ext_flags,
  3322. dealloc);
  3323. if (ret) {
  3324. mlog_errno(ret);
  3325. goto out;
  3326. }
  3327. }
  3328. cpos += num_clusters;
  3329. }
  3330. out:
  3331. return ret;
  3332. }
  3333. /*
  3334. * change the new file's attributes to the src.
  3335. *
  3336. * reflink creates a snapshot of a file, that means the attributes
  3337. * must be identical except for three exceptions - nlink, ino, and ctime.
  3338. */
  3339. static int ocfs2_complete_reflink(struct inode *s_inode,
  3340. struct buffer_head *s_bh,
  3341. struct inode *t_inode,
  3342. struct buffer_head *t_bh)
  3343. {
  3344. int ret;
  3345. handle_t *handle;
  3346. struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
  3347. struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data;
  3348. loff_t size = i_size_read(s_inode);
  3349. handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb),
  3350. OCFS2_INODE_UPDATE_CREDITS);
  3351. if (IS_ERR(handle)) {
  3352. ret = PTR_ERR(handle);
  3353. mlog_errno(ret);
  3354. return ret;
  3355. }
  3356. ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
  3357. OCFS2_JOURNAL_ACCESS_WRITE);
  3358. if (ret) {
  3359. mlog_errno(ret);
  3360. goto out_commit;
  3361. }
  3362. spin_lock(&OCFS2_I(t_inode)->ip_lock);
  3363. OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters;
  3364. OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr;
  3365. OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
  3366. spin_unlock(&OCFS2_I(t_inode)->ip_lock);
  3367. i_size_write(t_inode, size);
  3368. di->i_xattr_inline_size = s_di->i_xattr_inline_size;
  3369. di->i_clusters = s_di->i_clusters;
  3370. di->i_size = s_di->i_size;
  3371. di->i_dyn_features = s_di->i_dyn_features;
  3372. di->i_attr = s_di->i_attr;
  3373. di->i_uid = s_di->i_uid;
  3374. di->i_gid = s_di->i_gid;
  3375. di->i_mode = s_di->i_mode;
  3376. /*
  3377. * update time.
  3378. * we want mtime to appear identical to the source and update ctime.
  3379. */
  3380. t_inode->i_ctime = CURRENT_TIME;
  3381. di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
  3382. di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
  3383. t_inode->i_mtime = s_inode->i_mtime;
  3384. di->i_mtime = s_di->i_mtime;
  3385. di->i_mtime_nsec = s_di->i_mtime_nsec;
  3386. ocfs2_journal_dirty(handle, t_bh);
  3387. out_commit:
  3388. ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle);
  3389. return ret;
  3390. }
  3391. static int ocfs2_create_reflink_node(struct inode *s_inode,
  3392. struct buffer_head *s_bh,
  3393. struct inode *t_inode,
  3394. struct buffer_head *t_bh)
  3395. {
  3396. int ret;
  3397. struct buffer_head *ref_root_bh = NULL;
  3398. struct ocfs2_cached_dealloc_ctxt dealloc;
  3399. struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
  3400. struct ocfs2_refcount_block *rb;
  3401. struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
  3402. struct ocfs2_refcount_tree *ref_tree;
  3403. ocfs2_init_dealloc_ctxt(&dealloc);
  3404. ret = ocfs2_set_refcount_tree(t_inode, t_bh,
  3405. le64_to_cpu(di->i_refcount_loc));
  3406. if (ret) {
  3407. mlog_errno(ret);
  3408. goto out;
  3409. }
  3410. ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
  3411. 1, &ref_tree, &ref_root_bh);
  3412. if (ret) {
  3413. mlog_errno(ret);
  3414. goto out;
  3415. }
  3416. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  3417. ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
  3418. &ref_tree->rf_ci, ref_root_bh,
  3419. &dealloc);
  3420. if (ret) {
  3421. mlog_errno(ret);
  3422. goto out_unlock_refcount;
  3423. }
  3424. ret = ocfs2_complete_reflink(s_inode, s_bh, t_inode, t_bh);
  3425. if (ret)
  3426. mlog_errno(ret);
  3427. out_unlock_refcount:
  3428. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  3429. brelse(ref_root_bh);
  3430. out:
  3431. if (ocfs2_dealloc_has_cluster(&dealloc)) {
  3432. ocfs2_schedule_truncate_log_flush(osb, 1);
  3433. ocfs2_run_deallocs(osb, &dealloc);
  3434. }
  3435. return ret;
  3436. }