refcounttree.c 83 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * refcounttree.c
  5. *
  6. * Copyright (C) 2009 Oracle. All rights reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public
  10. * License version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <linux/sort.h>
  18. #define MLOG_MASK_PREFIX ML_REFCOUNT
  19. #include <cluster/masklog.h>
  20. #include "ocfs2.h"
  21. #include "inode.h"
  22. #include "alloc.h"
  23. #include "suballoc.h"
  24. #include "journal.h"
  25. #include "uptodate.h"
  26. #include "super.h"
  27. #include "buffer_head_io.h"
  28. #include "blockcheck.h"
  29. #include "refcounttree.h"
  30. #include "sysfile.h"
  31. #include "dlmglue.h"
  32. #include "extent_map.h"
  33. #include "aops.h"
  34. #include <linux/bio.h>
  35. #include <linux/blkdev.h>
  36. #include <linux/gfp.h>
  37. #include <linux/slab.h>
  38. #include <linux/writeback.h>
  39. #include <linux/pagevec.h>
  40. #include <linux/swap.h>
  41. struct ocfs2_cow_context {
  42. struct inode *inode;
  43. u32 cow_start;
  44. u32 cow_len;
  45. struct ocfs2_extent_tree di_et;
  46. struct ocfs2_caching_info *ref_ci;
  47. struct buffer_head *ref_root_bh;
  48. struct ocfs2_alloc_context *meta_ac;
  49. struct ocfs2_alloc_context *data_ac;
  50. struct ocfs2_cached_dealloc_ctxt dealloc;
  51. };
  52. static inline struct ocfs2_refcount_tree *
  53. cache_info_to_refcount(struct ocfs2_caching_info *ci)
  54. {
  55. return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
  56. }
  57. static int ocfs2_validate_refcount_block(struct super_block *sb,
  58. struct buffer_head *bh)
  59. {
  60. int rc;
  61. struct ocfs2_refcount_block *rb =
  62. (struct ocfs2_refcount_block *)bh->b_data;
  63. mlog(0, "Validating refcount block %llu\n",
  64. (unsigned long long)bh->b_blocknr);
  65. BUG_ON(!buffer_uptodate(bh));
  66. /*
  67. * If the ecc fails, we return the error but otherwise
  68. * leave the filesystem running. We know any error is
  69. * local to this block.
  70. */
  71. rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
  72. if (rc) {
  73. mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
  74. (unsigned long long)bh->b_blocknr);
  75. return rc;
  76. }
  77. if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
  78. ocfs2_error(sb,
  79. "Refcount block #%llu has bad signature %.*s",
  80. (unsigned long long)bh->b_blocknr, 7,
  81. rb->rf_signature);
  82. return -EINVAL;
  83. }
  84. if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
  85. ocfs2_error(sb,
  86. "Refcount block #%llu has an invalid rf_blkno "
  87. "of %llu",
  88. (unsigned long long)bh->b_blocknr,
  89. (unsigned long long)le64_to_cpu(rb->rf_blkno));
  90. return -EINVAL;
  91. }
  92. if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
  93. ocfs2_error(sb,
  94. "Refcount block #%llu has an invalid "
  95. "rf_fs_generation of #%u",
  96. (unsigned long long)bh->b_blocknr,
  97. le32_to_cpu(rb->rf_fs_generation));
  98. return -EINVAL;
  99. }
  100. return 0;
  101. }
  102. static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
  103. u64 rb_blkno,
  104. struct buffer_head **bh)
  105. {
  106. int rc;
  107. struct buffer_head *tmp = *bh;
  108. rc = ocfs2_read_block(ci, rb_blkno, &tmp,
  109. ocfs2_validate_refcount_block);
  110. /* If ocfs2_read_block() got us a new bh, pass it up. */
  111. if (!rc && !*bh)
  112. *bh = tmp;
  113. return rc;
  114. }
  115. static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
  116. {
  117. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  118. return rf->rf_blkno;
  119. }
  120. static struct super_block *
  121. ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
  122. {
  123. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  124. return rf->rf_sb;
  125. }
  126. static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
  127. {
  128. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  129. spin_lock(&rf->rf_lock);
  130. }
  131. static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
  132. {
  133. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  134. spin_unlock(&rf->rf_lock);
  135. }
  136. static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
  137. {
  138. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  139. mutex_lock(&rf->rf_io_mutex);
  140. }
  141. static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
  142. {
  143. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  144. mutex_unlock(&rf->rf_io_mutex);
  145. }
  146. static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
  147. .co_owner = ocfs2_refcount_cache_owner,
  148. .co_get_super = ocfs2_refcount_cache_get_super,
  149. .co_cache_lock = ocfs2_refcount_cache_lock,
  150. .co_cache_unlock = ocfs2_refcount_cache_unlock,
  151. .co_io_lock = ocfs2_refcount_cache_io_lock,
  152. .co_io_unlock = ocfs2_refcount_cache_io_unlock,
  153. };
  154. static struct ocfs2_refcount_tree *
  155. ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
  156. {
  157. struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
  158. struct ocfs2_refcount_tree *tree = NULL;
  159. while (n) {
  160. tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
  161. if (blkno < tree->rf_blkno)
  162. n = n->rb_left;
  163. else if (blkno > tree->rf_blkno)
  164. n = n->rb_right;
  165. else
  166. return tree;
  167. }
  168. return NULL;
  169. }
  170. /* osb_lock is already locked. */
  171. static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
  172. struct ocfs2_refcount_tree *new)
  173. {
  174. u64 rf_blkno = new->rf_blkno;
  175. struct rb_node *parent = NULL;
  176. struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
  177. struct ocfs2_refcount_tree *tmp;
  178. while (*p) {
  179. parent = *p;
  180. tmp = rb_entry(parent, struct ocfs2_refcount_tree,
  181. rf_node);
  182. if (rf_blkno < tmp->rf_blkno)
  183. p = &(*p)->rb_left;
  184. else if (rf_blkno > tmp->rf_blkno)
  185. p = &(*p)->rb_right;
  186. else {
  187. /* This should never happen! */
  188. mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
  189. (unsigned long long)rf_blkno);
  190. BUG();
  191. }
  192. }
  193. rb_link_node(&new->rf_node, parent, p);
  194. rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
  195. }
  196. static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
  197. {
  198. ocfs2_metadata_cache_exit(&tree->rf_ci);
  199. ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
  200. ocfs2_lock_res_free(&tree->rf_lockres);
  201. kfree(tree);
  202. }
  203. static inline void
  204. ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
  205. struct ocfs2_refcount_tree *tree)
  206. {
  207. rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
  208. if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
  209. osb->osb_ref_tree_lru = NULL;
  210. }
  211. static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
  212. struct ocfs2_refcount_tree *tree)
  213. {
  214. spin_lock(&osb->osb_lock);
  215. ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
  216. spin_unlock(&osb->osb_lock);
  217. }
  218. void ocfs2_kref_remove_refcount_tree(struct kref *kref)
  219. {
  220. struct ocfs2_refcount_tree *tree =
  221. container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
  222. ocfs2_free_refcount_tree(tree);
  223. }
  224. static inline void
  225. ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
  226. {
  227. kref_get(&tree->rf_getcnt);
  228. }
  229. static inline void
  230. ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
  231. {
  232. kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
  233. }
  234. static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
  235. struct super_block *sb)
  236. {
  237. ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
  238. mutex_init(&new->rf_io_mutex);
  239. new->rf_sb = sb;
  240. spin_lock_init(&new->rf_lock);
  241. }
  242. static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
  243. struct ocfs2_refcount_tree *new,
  244. u64 rf_blkno, u32 generation)
  245. {
  246. init_rwsem(&new->rf_sem);
  247. ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
  248. rf_blkno, generation);
  249. }
  250. static struct ocfs2_refcount_tree*
  251. ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
  252. {
  253. struct ocfs2_refcount_tree *new;
  254. new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
  255. if (!new)
  256. return NULL;
  257. new->rf_blkno = rf_blkno;
  258. kref_init(&new->rf_getcnt);
  259. ocfs2_init_refcount_tree_ci(new, osb->sb);
  260. return new;
  261. }
  262. static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
  263. struct ocfs2_refcount_tree **ret_tree)
  264. {
  265. int ret = 0;
  266. struct ocfs2_refcount_tree *tree, *new = NULL;
  267. struct buffer_head *ref_root_bh = NULL;
  268. struct ocfs2_refcount_block *ref_rb;
  269. spin_lock(&osb->osb_lock);
  270. if (osb->osb_ref_tree_lru &&
  271. osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
  272. tree = osb->osb_ref_tree_lru;
  273. else
  274. tree = ocfs2_find_refcount_tree(osb, rf_blkno);
  275. if (tree)
  276. goto out;
  277. spin_unlock(&osb->osb_lock);
  278. new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
  279. if (!new) {
  280. ret = -ENOMEM;
  281. mlog_errno(ret);
  282. return ret;
  283. }
  284. /*
  285. * We need the generation to create the refcount tree lock and since
  286. * it isn't changed during the tree modification, we are safe here to
  287. * read without protection.
  288. * We also have to purge the cache after we create the lock since the
  289. * refcount block may have the stale data. It can only be trusted when
  290. * we hold the refcount lock.
  291. */
  292. ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
  293. if (ret) {
  294. mlog_errno(ret);
  295. ocfs2_metadata_cache_exit(&new->rf_ci);
  296. kfree(new);
  297. return ret;
  298. }
  299. ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  300. new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
  301. ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
  302. new->rf_generation);
  303. ocfs2_metadata_cache_purge(&new->rf_ci);
  304. spin_lock(&osb->osb_lock);
  305. tree = ocfs2_find_refcount_tree(osb, rf_blkno);
  306. if (tree)
  307. goto out;
  308. ocfs2_insert_refcount_tree(osb, new);
  309. tree = new;
  310. new = NULL;
  311. out:
  312. *ret_tree = tree;
  313. osb->osb_ref_tree_lru = tree;
  314. spin_unlock(&osb->osb_lock);
  315. if (new)
  316. ocfs2_free_refcount_tree(new);
  317. brelse(ref_root_bh);
  318. return ret;
  319. }
  320. static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
  321. {
  322. int ret;
  323. struct buffer_head *di_bh = NULL;
  324. struct ocfs2_dinode *di;
  325. ret = ocfs2_read_inode_block(inode, &di_bh);
  326. if (ret) {
  327. mlog_errno(ret);
  328. goto out;
  329. }
  330. BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  331. di = (struct ocfs2_dinode *)di_bh->b_data;
  332. *ref_blkno = le64_to_cpu(di->i_refcount_loc);
  333. brelse(di_bh);
  334. out:
  335. return ret;
  336. }
  337. static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
  338. struct ocfs2_refcount_tree *tree, int rw)
  339. {
  340. int ret;
  341. ret = ocfs2_refcount_lock(tree, rw);
  342. if (ret) {
  343. mlog_errno(ret);
  344. goto out;
  345. }
  346. if (rw)
  347. down_write(&tree->rf_sem);
  348. else
  349. down_read(&tree->rf_sem);
  350. out:
  351. return ret;
  352. }
  353. /*
  354. * Lock the refcount tree pointed by ref_blkno and return the tree.
  355. * In most case, we lock the tree and read the refcount block.
  356. * So read it here if the caller really needs it.
  357. *
  358. * If the tree has been re-created by other node, it will free the
  359. * old one and re-create it.
  360. */
  361. int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
  362. u64 ref_blkno, int rw,
  363. struct ocfs2_refcount_tree **ret_tree,
  364. struct buffer_head **ref_bh)
  365. {
  366. int ret, delete_tree = 0;
  367. struct ocfs2_refcount_tree *tree = NULL;
  368. struct buffer_head *ref_root_bh = NULL;
  369. struct ocfs2_refcount_block *rb;
  370. again:
  371. ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
  372. if (ret) {
  373. mlog_errno(ret);
  374. return ret;
  375. }
  376. ocfs2_refcount_tree_get(tree);
  377. ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
  378. if (ret) {
  379. mlog_errno(ret);
  380. ocfs2_refcount_tree_put(tree);
  381. goto out;
  382. }
  383. ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
  384. &ref_root_bh);
  385. if (ret) {
  386. mlog_errno(ret);
  387. ocfs2_unlock_refcount_tree(osb, tree, rw);
  388. ocfs2_refcount_tree_put(tree);
  389. goto out;
  390. }
  391. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  392. /*
  393. * If the refcount block has been freed and re-created, we may need
  394. * to recreate the refcount tree also.
  395. *
  396. * Here we just remove the tree from the rb-tree, and the last
  397. * kref holder will unlock and delete this refcount_tree.
  398. * Then we goto "again" and ocfs2_get_refcount_tree will create
  399. * the new refcount tree for us.
  400. */
  401. if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
  402. if (!tree->rf_removed) {
  403. ocfs2_erase_refcount_tree_from_list(osb, tree);
  404. tree->rf_removed = 1;
  405. delete_tree = 1;
  406. }
  407. ocfs2_unlock_refcount_tree(osb, tree, rw);
  408. /*
  409. * We get an extra reference when we create the refcount
  410. * tree, so another put will destroy it.
  411. */
  412. if (delete_tree)
  413. ocfs2_refcount_tree_put(tree);
  414. brelse(ref_root_bh);
  415. ref_root_bh = NULL;
  416. goto again;
  417. }
  418. *ret_tree = tree;
  419. if (ref_bh) {
  420. *ref_bh = ref_root_bh;
  421. ref_root_bh = NULL;
  422. }
  423. out:
  424. brelse(ref_root_bh);
  425. return ret;
  426. }
  427. int ocfs2_lock_refcount_tree_by_inode(struct inode *inode, int rw,
  428. struct ocfs2_refcount_tree **ret_tree,
  429. struct buffer_head **ref_bh)
  430. {
  431. int ret;
  432. u64 ref_blkno;
  433. ret = ocfs2_get_refcount_block(inode, &ref_blkno);
  434. if (ret) {
  435. mlog_errno(ret);
  436. return ret;
  437. }
  438. return ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno,
  439. rw, ret_tree, ref_bh);
  440. }
  441. void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
  442. struct ocfs2_refcount_tree *tree, int rw)
  443. {
  444. if (rw)
  445. up_write(&tree->rf_sem);
  446. else
  447. up_read(&tree->rf_sem);
  448. ocfs2_refcount_unlock(tree, rw);
  449. ocfs2_refcount_tree_put(tree);
  450. }
  451. void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
  452. {
  453. struct rb_node *node;
  454. struct ocfs2_refcount_tree *tree;
  455. struct rb_root *root = &osb->osb_rf_lock_tree;
  456. while ((node = rb_last(root)) != NULL) {
  457. tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
  458. mlog(0, "Purge tree %llu\n",
  459. (unsigned long long) tree->rf_blkno);
  460. rb_erase(&tree->rf_node, root);
  461. ocfs2_free_refcount_tree(tree);
  462. }
  463. }
  464. /*
  465. * Create a refcount tree for an inode.
  466. * We take for granted that the inode is already locked.
  467. */
  468. static int ocfs2_create_refcount_tree(struct inode *inode,
  469. struct buffer_head *di_bh)
  470. {
  471. int ret;
  472. handle_t *handle = NULL;
  473. struct ocfs2_alloc_context *meta_ac = NULL;
  474. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  475. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  476. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  477. struct buffer_head *new_bh = NULL;
  478. struct ocfs2_refcount_block *rb;
  479. struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
  480. u16 suballoc_bit_start;
  481. u32 num_got;
  482. u64 first_blkno;
  483. BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
  484. mlog(0, "create tree for inode %lu\n", inode->i_ino);
  485. ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
  486. if (ret) {
  487. mlog_errno(ret);
  488. goto out;
  489. }
  490. handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
  491. if (IS_ERR(handle)) {
  492. ret = PTR_ERR(handle);
  493. mlog_errno(ret);
  494. goto out;
  495. }
  496. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  497. OCFS2_JOURNAL_ACCESS_WRITE);
  498. if (ret) {
  499. mlog_errno(ret);
  500. goto out_commit;
  501. }
  502. ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
  503. &suballoc_bit_start, &num_got,
  504. &first_blkno);
  505. if (ret) {
  506. mlog_errno(ret);
  507. goto out_commit;
  508. }
  509. new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
  510. if (!new_tree) {
  511. ret = -ENOMEM;
  512. mlog_errno(ret);
  513. goto out_commit;
  514. }
  515. new_bh = sb_getblk(inode->i_sb, first_blkno);
  516. ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
  517. ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
  518. OCFS2_JOURNAL_ACCESS_CREATE);
  519. if (ret) {
  520. mlog_errno(ret);
  521. goto out_commit;
  522. }
  523. /* Initialize ocfs2_refcount_block. */
  524. rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  525. memset(rb, 0, inode->i_sb->s_blocksize);
  526. strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
  527. rb->rf_suballoc_slot = cpu_to_le16(osb->slot_num);
  528. rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  529. rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
  530. rb->rf_blkno = cpu_to_le64(first_blkno);
  531. rb->rf_count = cpu_to_le32(1);
  532. rb->rf_records.rl_count =
  533. cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
  534. spin_lock(&osb->osb_lock);
  535. rb->rf_generation = osb->s_next_generation++;
  536. spin_unlock(&osb->osb_lock);
  537. ocfs2_journal_dirty(handle, new_bh);
  538. spin_lock(&oi->ip_lock);
  539. oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
  540. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  541. di->i_refcount_loc = cpu_to_le64(first_blkno);
  542. spin_unlock(&oi->ip_lock);
  543. mlog(0, "created tree for inode %lu, refblock %llu\n",
  544. inode->i_ino, (unsigned long long)first_blkno);
  545. ocfs2_journal_dirty(handle, di_bh);
  546. /*
  547. * We have to init the tree lock here since it will use
  548. * the generation number to create it.
  549. */
  550. new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
  551. ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
  552. new_tree->rf_generation);
  553. spin_lock(&osb->osb_lock);
  554. tree = ocfs2_find_refcount_tree(osb, first_blkno);
  555. /*
  556. * We've just created a new refcount tree in this block. If
  557. * we found a refcount tree on the ocfs2_super, it must be
  558. * one we just deleted. We free the old tree before
  559. * inserting the new tree.
  560. */
  561. BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
  562. if (tree)
  563. ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
  564. ocfs2_insert_refcount_tree(osb, new_tree);
  565. spin_unlock(&osb->osb_lock);
  566. new_tree = NULL;
  567. if (tree)
  568. ocfs2_refcount_tree_put(tree);
  569. out_commit:
  570. ocfs2_commit_trans(osb, handle);
  571. out:
  572. if (new_tree) {
  573. ocfs2_metadata_cache_exit(&new_tree->rf_ci);
  574. kfree(new_tree);
  575. }
  576. brelse(new_bh);
  577. if (meta_ac)
  578. ocfs2_free_alloc_context(meta_ac);
  579. return ret;
  580. }
  581. static int ocfs2_set_refcount_tree(struct inode *inode,
  582. struct buffer_head *di_bh,
  583. u64 refcount_loc)
  584. {
  585. int ret;
  586. handle_t *handle = NULL;
  587. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  588. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  589. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  590. struct buffer_head *ref_root_bh = NULL;
  591. struct ocfs2_refcount_block *rb;
  592. struct ocfs2_refcount_tree *ref_tree;
  593. BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
  594. ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
  595. &ref_tree, &ref_root_bh);
  596. if (ret) {
  597. mlog_errno(ret);
  598. return ret;
  599. }
  600. handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
  601. if (IS_ERR(handle)) {
  602. ret = PTR_ERR(handle);
  603. mlog_errno(ret);
  604. goto out;
  605. }
  606. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  607. OCFS2_JOURNAL_ACCESS_WRITE);
  608. if (ret) {
  609. mlog_errno(ret);
  610. goto out_commit;
  611. }
  612. ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
  613. OCFS2_JOURNAL_ACCESS_WRITE);
  614. if (ret) {
  615. mlog_errno(ret);
  616. goto out_commit;
  617. }
  618. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  619. le32_add_cpu(&rb->rf_count, 1);
  620. ocfs2_journal_dirty(handle, ref_root_bh);
  621. spin_lock(&oi->ip_lock);
  622. oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
  623. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  624. di->i_refcount_loc = cpu_to_le64(refcount_loc);
  625. spin_unlock(&oi->ip_lock);
  626. ocfs2_journal_dirty(handle, di_bh);
  627. out_commit:
  628. ocfs2_commit_trans(osb, handle);
  629. out:
  630. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  631. brelse(ref_root_bh);
  632. return ret;
  633. }
  634. int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
  635. {
  636. int ret, delete_tree = 0;
  637. handle_t *handle = NULL;
  638. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  639. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  640. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  641. struct ocfs2_refcount_block *rb;
  642. struct inode *alloc_inode = NULL;
  643. struct buffer_head *alloc_bh = NULL;
  644. struct buffer_head *blk_bh = NULL;
  645. struct ocfs2_refcount_tree *ref_tree;
  646. int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
  647. u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
  648. u16 bit = 0;
  649. if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL))
  650. return 0;
  651. BUG_ON(!ref_blkno);
  652. ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
  653. if (ret) {
  654. mlog_errno(ret);
  655. return ret;
  656. }
  657. rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
  658. /*
  659. * If we are the last user, we need to free the block.
  660. * So lock the allocator ahead.
  661. */
  662. if (le32_to_cpu(rb->rf_count) == 1) {
  663. blk = le64_to_cpu(rb->rf_blkno);
  664. bit = le16_to_cpu(rb->rf_suballoc_bit);
  665. bg_blkno = ocfs2_which_suballoc_group(blk, bit);
  666. alloc_inode = ocfs2_get_system_file_inode(osb,
  667. EXTENT_ALLOC_SYSTEM_INODE,
  668. le16_to_cpu(rb->rf_suballoc_slot));
  669. if (!alloc_inode) {
  670. ret = -ENOMEM;
  671. mlog_errno(ret);
  672. goto out;
  673. }
  674. mutex_lock(&alloc_inode->i_mutex);
  675. ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
  676. if (ret) {
  677. mlog_errno(ret);
  678. goto out_mutex;
  679. }
  680. credits += OCFS2_SUBALLOC_FREE;
  681. }
  682. handle = ocfs2_start_trans(osb, credits);
  683. if (IS_ERR(handle)) {
  684. ret = PTR_ERR(handle);
  685. mlog_errno(ret);
  686. goto out_unlock;
  687. }
  688. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  689. OCFS2_JOURNAL_ACCESS_WRITE);
  690. if (ret) {
  691. mlog_errno(ret);
  692. goto out_commit;
  693. }
  694. ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
  695. OCFS2_JOURNAL_ACCESS_WRITE);
  696. if (ret) {
  697. mlog_errno(ret);
  698. goto out_commit;
  699. }
  700. spin_lock(&oi->ip_lock);
  701. oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
  702. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  703. di->i_refcount_loc = 0;
  704. spin_unlock(&oi->ip_lock);
  705. ocfs2_journal_dirty(handle, di_bh);
  706. le32_add_cpu(&rb->rf_count , -1);
  707. ocfs2_journal_dirty(handle, blk_bh);
  708. if (!rb->rf_count) {
  709. delete_tree = 1;
  710. ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
  711. ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
  712. alloc_bh, bit, bg_blkno, 1);
  713. if (ret)
  714. mlog_errno(ret);
  715. }
  716. out_commit:
  717. ocfs2_commit_trans(osb, handle);
  718. out_unlock:
  719. if (alloc_inode) {
  720. ocfs2_inode_unlock(alloc_inode, 1);
  721. brelse(alloc_bh);
  722. }
  723. out_mutex:
  724. if (alloc_inode) {
  725. mutex_unlock(&alloc_inode->i_mutex);
  726. iput(alloc_inode);
  727. }
  728. out:
  729. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  730. if (delete_tree)
  731. ocfs2_refcount_tree_put(ref_tree);
  732. brelse(blk_bh);
  733. return ret;
  734. }
  735. static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
  736. struct buffer_head *ref_leaf_bh,
  737. u64 cpos, unsigned int len,
  738. struct ocfs2_refcount_rec *ret_rec,
  739. int *index)
  740. {
  741. int i = 0;
  742. struct ocfs2_refcount_block *rb =
  743. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  744. struct ocfs2_refcount_rec *rec = NULL;
  745. for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
  746. rec = &rb->rf_records.rl_recs[i];
  747. if (le64_to_cpu(rec->r_cpos) +
  748. le32_to_cpu(rec->r_clusters) <= cpos)
  749. continue;
  750. else if (le64_to_cpu(rec->r_cpos) > cpos)
  751. break;
  752. /* ok, cpos fail in this rec. Just return. */
  753. if (ret_rec)
  754. *ret_rec = *rec;
  755. goto out;
  756. }
  757. if (ret_rec) {
  758. /* We meet with a hole here, so fake the rec. */
  759. ret_rec->r_cpos = cpu_to_le64(cpos);
  760. ret_rec->r_refcount = 0;
  761. if (i < le16_to_cpu(rb->rf_records.rl_used) &&
  762. le64_to_cpu(rec->r_cpos) < cpos + len)
  763. ret_rec->r_clusters =
  764. cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
  765. else
  766. ret_rec->r_clusters = cpu_to_le32(len);
  767. }
  768. out:
  769. *index = i;
  770. }
  771. /*
  772. * Given a cpos and len, try to find the refcount record which contains cpos.
  773. * 1. If cpos can be found in one refcount record, return the record.
  774. * 2. If cpos can't be found, return a fake record which start from cpos
  775. * and end at a small value between cpos+len and start of the next record.
  776. * This fake record has r_refcount = 0.
  777. */
  778. static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
  779. struct buffer_head *ref_root_bh,
  780. u64 cpos, unsigned int len,
  781. struct ocfs2_refcount_rec *ret_rec,
  782. int *index,
  783. struct buffer_head **ret_bh)
  784. {
  785. int ret = 0, i, found;
  786. u32 low_cpos;
  787. struct ocfs2_extent_list *el;
  788. struct ocfs2_extent_rec *tmp, *rec = NULL;
  789. struct ocfs2_extent_block *eb;
  790. struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
  791. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  792. struct ocfs2_refcount_block *rb =
  793. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  794. if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
  795. ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
  796. ret_rec, index);
  797. *ret_bh = ref_root_bh;
  798. get_bh(ref_root_bh);
  799. return 0;
  800. }
  801. el = &rb->rf_list;
  802. low_cpos = cpos & OCFS2_32BIT_POS_MASK;
  803. if (el->l_tree_depth) {
  804. ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
  805. if (ret) {
  806. mlog_errno(ret);
  807. goto out;
  808. }
  809. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  810. el = &eb->h_list;
  811. if (el->l_tree_depth) {
  812. ocfs2_error(sb,
  813. "refcount tree %llu has non zero tree "
  814. "depth in leaf btree tree block %llu\n",
  815. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  816. (unsigned long long)eb_bh->b_blocknr);
  817. ret = -EROFS;
  818. goto out;
  819. }
  820. }
  821. found = 0;
  822. for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
  823. rec = &el->l_recs[i];
  824. if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
  825. found = 1;
  826. break;
  827. }
  828. }
  829. /* adjust len when we have ocfs2_extent_rec after it. */
  830. if (found && i < le16_to_cpu(el->l_next_free_rec) - 1) {
  831. tmp = &el->l_recs[i+1];
  832. if (le32_to_cpu(tmp->e_cpos) < cpos + len)
  833. len = le32_to_cpu(tmp->e_cpos) - cpos;
  834. }
  835. ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
  836. &ref_leaf_bh);
  837. if (ret) {
  838. mlog_errno(ret);
  839. goto out;
  840. }
  841. ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
  842. ret_rec, index);
  843. *ret_bh = ref_leaf_bh;
  844. out:
  845. brelse(eb_bh);
  846. return ret;
  847. }
  848. enum ocfs2_ref_rec_contig {
  849. REF_CONTIG_NONE = 0,
  850. REF_CONTIG_LEFT,
  851. REF_CONTIG_RIGHT,
  852. REF_CONTIG_LEFTRIGHT,
  853. };
  854. static enum ocfs2_ref_rec_contig
  855. ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
  856. int index)
  857. {
  858. if ((rb->rf_records.rl_recs[index].r_refcount ==
  859. rb->rf_records.rl_recs[index + 1].r_refcount) &&
  860. (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
  861. le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
  862. le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
  863. return REF_CONTIG_RIGHT;
  864. return REF_CONTIG_NONE;
  865. }
  866. static enum ocfs2_ref_rec_contig
  867. ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
  868. int index)
  869. {
  870. enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
  871. if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
  872. ret = ocfs2_refcount_rec_adjacent(rb, index);
  873. if (index > 0) {
  874. enum ocfs2_ref_rec_contig tmp;
  875. tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
  876. if (tmp == REF_CONTIG_RIGHT) {
  877. if (ret == REF_CONTIG_RIGHT)
  878. ret = REF_CONTIG_LEFTRIGHT;
  879. else
  880. ret = REF_CONTIG_LEFT;
  881. }
  882. }
  883. return ret;
  884. }
  885. static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
  886. int index)
  887. {
  888. BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
  889. rb->rf_records.rl_recs[index+1].r_refcount);
  890. le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
  891. le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
  892. if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
  893. memmove(&rb->rf_records.rl_recs[index + 1],
  894. &rb->rf_records.rl_recs[index + 2],
  895. sizeof(struct ocfs2_refcount_rec) *
  896. (le16_to_cpu(rb->rf_records.rl_used) - index - 2));
  897. memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
  898. 0, sizeof(struct ocfs2_refcount_rec));
  899. le16_add_cpu(&rb->rf_records.rl_used, -1);
  900. }
  901. /*
  902. * Merge the refcount rec if we are contiguous with the adjacent recs.
  903. */
  904. static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
  905. int index)
  906. {
  907. enum ocfs2_ref_rec_contig contig =
  908. ocfs2_refcount_rec_contig(rb, index);
  909. if (contig == REF_CONTIG_NONE)
  910. return;
  911. if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
  912. BUG_ON(index == 0);
  913. index--;
  914. }
  915. ocfs2_rotate_refcount_rec_left(rb, index);
  916. if (contig == REF_CONTIG_LEFTRIGHT)
  917. ocfs2_rotate_refcount_rec_left(rb, index);
  918. }
  919. /*
  920. * Change the refcount indexed by "index" in ref_bh.
  921. * If refcount reaches 0, remove it.
  922. */
  923. static int ocfs2_change_refcount_rec(handle_t *handle,
  924. struct ocfs2_caching_info *ci,
  925. struct buffer_head *ref_leaf_bh,
  926. int index, int change)
  927. {
  928. int ret;
  929. struct ocfs2_refcount_block *rb =
  930. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  931. struct ocfs2_refcount_list *rl = &rb->rf_records;
  932. struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
  933. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  934. OCFS2_JOURNAL_ACCESS_WRITE);
  935. if (ret) {
  936. mlog_errno(ret);
  937. goto out;
  938. }
  939. mlog(0, "change index %d, old count %u, change %d\n", index,
  940. le32_to_cpu(rec->r_refcount), change);
  941. le32_add_cpu(&rec->r_refcount, change);
  942. if (!rec->r_refcount) {
  943. if (index != le16_to_cpu(rl->rl_used) - 1) {
  944. memmove(rec, rec + 1,
  945. (le16_to_cpu(rl->rl_used) - index - 1) *
  946. sizeof(struct ocfs2_refcount_rec));
  947. memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
  948. 0, sizeof(struct ocfs2_refcount_rec));
  949. }
  950. le16_add_cpu(&rl->rl_used, -1);
  951. } else
  952. ocfs2_refcount_rec_merge(rb, index);
  953. ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
  954. if (ret)
  955. mlog_errno(ret);
  956. out:
  957. return ret;
  958. }
  959. static int ocfs2_expand_inline_ref_root(handle_t *handle,
  960. struct ocfs2_caching_info *ci,
  961. struct buffer_head *ref_root_bh,
  962. struct buffer_head **ref_leaf_bh,
  963. struct ocfs2_alloc_context *meta_ac)
  964. {
  965. int ret;
  966. u16 suballoc_bit_start;
  967. u32 num_got;
  968. u64 blkno;
  969. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  970. struct buffer_head *new_bh = NULL;
  971. struct ocfs2_refcount_block *new_rb;
  972. struct ocfs2_refcount_block *root_rb =
  973. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  974. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  975. OCFS2_JOURNAL_ACCESS_WRITE);
  976. if (ret) {
  977. mlog_errno(ret);
  978. goto out;
  979. }
  980. ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
  981. &suballoc_bit_start, &num_got,
  982. &blkno);
  983. if (ret) {
  984. mlog_errno(ret);
  985. goto out;
  986. }
  987. new_bh = sb_getblk(sb, blkno);
  988. if (new_bh == NULL) {
  989. ret = -EIO;
  990. mlog_errno(ret);
  991. goto out;
  992. }
  993. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  994. ret = ocfs2_journal_access_rb(handle, ci, new_bh,
  995. OCFS2_JOURNAL_ACCESS_CREATE);
  996. if (ret) {
  997. mlog_errno(ret);
  998. goto out;
  999. }
  1000. /*
  1001. * Initialize ocfs2_refcount_block.
  1002. * It should contain the same information as the old root.
  1003. * so just memcpy it and change the corresponding field.
  1004. */
  1005. memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
  1006. new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  1007. new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
  1008. new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  1009. new_rb->rf_blkno = cpu_to_le64(blkno);
  1010. new_rb->rf_cpos = cpu_to_le32(0);
  1011. new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
  1012. new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
  1013. ocfs2_journal_dirty(handle, new_bh);
  1014. /* Now change the root. */
  1015. memset(&root_rb->rf_list, 0, sb->s_blocksize -
  1016. offsetof(struct ocfs2_refcount_block, rf_list));
  1017. root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
  1018. root_rb->rf_clusters = cpu_to_le32(1);
  1019. root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
  1020. root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
  1021. root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
  1022. root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
  1023. ocfs2_journal_dirty(handle, ref_root_bh);
  1024. mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno,
  1025. le16_to_cpu(new_rb->rf_records.rl_used));
  1026. *ref_leaf_bh = new_bh;
  1027. new_bh = NULL;
  1028. out:
  1029. brelse(new_bh);
  1030. return ret;
  1031. }
  1032. static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
  1033. struct ocfs2_refcount_rec *next)
  1034. {
  1035. if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
  1036. ocfs2_get_ref_rec_low_cpos(next))
  1037. return 1;
  1038. return 0;
  1039. }
  1040. static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
  1041. {
  1042. const struct ocfs2_refcount_rec *l = a, *r = b;
  1043. u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
  1044. u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
  1045. if (l_cpos > r_cpos)
  1046. return 1;
  1047. if (l_cpos < r_cpos)
  1048. return -1;
  1049. return 0;
  1050. }
  1051. static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
  1052. {
  1053. const struct ocfs2_refcount_rec *l = a, *r = b;
  1054. u64 l_cpos = le64_to_cpu(l->r_cpos);
  1055. u64 r_cpos = le64_to_cpu(r->r_cpos);
  1056. if (l_cpos > r_cpos)
  1057. return 1;
  1058. if (l_cpos < r_cpos)
  1059. return -1;
  1060. return 0;
  1061. }
  1062. static void swap_refcount_rec(void *a, void *b, int size)
  1063. {
  1064. struct ocfs2_refcount_rec *l = a, *r = b, tmp;
  1065. tmp = *(struct ocfs2_refcount_rec *)l;
  1066. *(struct ocfs2_refcount_rec *)l =
  1067. *(struct ocfs2_refcount_rec *)r;
  1068. *(struct ocfs2_refcount_rec *)r = tmp;
  1069. }
  1070. /*
  1071. * The refcount cpos are ordered by their 64bit cpos,
  1072. * But we will use the low 32 bit to be the e_cpos in the b-tree.
  1073. * So we need to make sure that this pos isn't intersected with others.
  1074. *
  1075. * Note: The refcount block is already sorted by their low 32 bit cpos,
  1076. * So just try the middle pos first, and we will exit when we find
  1077. * the good position.
  1078. */
  1079. static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
  1080. u32 *split_pos, int *split_index)
  1081. {
  1082. int num_used = le16_to_cpu(rl->rl_used);
  1083. int delta, middle = num_used / 2;
  1084. for (delta = 0; delta < middle; delta++) {
  1085. /* Let's check delta earlier than middle */
  1086. if (ocfs2_refcount_rec_no_intersect(
  1087. &rl->rl_recs[middle - delta - 1],
  1088. &rl->rl_recs[middle - delta])) {
  1089. *split_index = middle - delta;
  1090. break;
  1091. }
  1092. /* For even counts, don't walk off the end */
  1093. if ((middle + delta + 1) == num_used)
  1094. continue;
  1095. /* Now try delta past middle */
  1096. if (ocfs2_refcount_rec_no_intersect(
  1097. &rl->rl_recs[middle + delta],
  1098. &rl->rl_recs[middle + delta + 1])) {
  1099. *split_index = middle + delta + 1;
  1100. break;
  1101. }
  1102. }
  1103. if (delta >= middle)
  1104. return -ENOSPC;
  1105. *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
  1106. return 0;
  1107. }
  1108. static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
  1109. struct buffer_head *new_bh,
  1110. u32 *split_cpos)
  1111. {
  1112. int split_index = 0, num_moved, ret;
  1113. u32 cpos = 0;
  1114. struct ocfs2_refcount_block *rb =
  1115. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1116. struct ocfs2_refcount_list *rl = &rb->rf_records;
  1117. struct ocfs2_refcount_block *new_rb =
  1118. (struct ocfs2_refcount_block *)new_bh->b_data;
  1119. struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
  1120. mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n",
  1121. (unsigned long long)ref_leaf_bh->b_blocknr,
  1122. le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
  1123. /*
  1124. * XXX: Improvement later.
  1125. * If we know all the high 32 bit cpos is the same, no need to sort.
  1126. *
  1127. * In order to make the whole process safe, we do:
  1128. * 1. sort the entries by their low 32 bit cpos first so that we can
  1129. * find the split cpos easily.
  1130. * 2. call ocfs2_insert_extent to insert the new refcount block.
  1131. * 3. move the refcount rec to the new block.
  1132. * 4. sort the entries by their 64 bit cpos.
  1133. * 5. dirty the new_rb and rb.
  1134. */
  1135. sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
  1136. sizeof(struct ocfs2_refcount_rec),
  1137. cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
  1138. ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
  1139. if (ret) {
  1140. mlog_errno(ret);
  1141. return ret;
  1142. }
  1143. new_rb->rf_cpos = cpu_to_le32(cpos);
  1144. /* move refcount records starting from split_index to the new block. */
  1145. num_moved = le16_to_cpu(rl->rl_used) - split_index;
  1146. memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
  1147. num_moved * sizeof(struct ocfs2_refcount_rec));
  1148. /*ok, remove the entries we just moved over to the other block. */
  1149. memset(&rl->rl_recs[split_index], 0,
  1150. num_moved * sizeof(struct ocfs2_refcount_rec));
  1151. /* change old and new rl_used accordingly. */
  1152. le16_add_cpu(&rl->rl_used, -num_moved);
  1153. new_rl->rl_used = cpu_to_le32(num_moved);
  1154. sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
  1155. sizeof(struct ocfs2_refcount_rec),
  1156. cmp_refcount_rec_by_cpos, swap_refcount_rec);
  1157. sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
  1158. sizeof(struct ocfs2_refcount_rec),
  1159. cmp_refcount_rec_by_cpos, swap_refcount_rec);
  1160. *split_cpos = cpos;
  1161. return 0;
  1162. }
  1163. static int ocfs2_new_leaf_refcount_block(handle_t *handle,
  1164. struct ocfs2_caching_info *ci,
  1165. struct buffer_head *ref_root_bh,
  1166. struct buffer_head *ref_leaf_bh,
  1167. struct ocfs2_alloc_context *meta_ac)
  1168. {
  1169. int ret;
  1170. u16 suballoc_bit_start;
  1171. u32 num_got, new_cpos;
  1172. u64 blkno;
  1173. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1174. struct ocfs2_refcount_block *root_rb =
  1175. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1176. struct buffer_head *new_bh = NULL;
  1177. struct ocfs2_refcount_block *new_rb;
  1178. struct ocfs2_extent_tree ref_et;
  1179. BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
  1180. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  1181. OCFS2_JOURNAL_ACCESS_WRITE);
  1182. if (ret) {
  1183. mlog_errno(ret);
  1184. goto out;
  1185. }
  1186. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1187. OCFS2_JOURNAL_ACCESS_WRITE);
  1188. if (ret) {
  1189. mlog_errno(ret);
  1190. goto out;
  1191. }
  1192. ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
  1193. &suballoc_bit_start, &num_got,
  1194. &blkno);
  1195. if (ret) {
  1196. mlog_errno(ret);
  1197. goto out;
  1198. }
  1199. new_bh = sb_getblk(sb, blkno);
  1200. if (new_bh == NULL) {
  1201. ret = -EIO;
  1202. mlog_errno(ret);
  1203. goto out;
  1204. }
  1205. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  1206. ret = ocfs2_journal_access_rb(handle, ci, new_bh,
  1207. OCFS2_JOURNAL_ACCESS_CREATE);
  1208. if (ret) {
  1209. mlog_errno(ret);
  1210. goto out;
  1211. }
  1212. /* Initialize ocfs2_refcount_block. */
  1213. new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  1214. memset(new_rb, 0, sb->s_blocksize);
  1215. strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
  1216. new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
  1217. new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  1218. new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
  1219. new_rb->rf_blkno = cpu_to_le64(blkno);
  1220. new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
  1221. new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
  1222. new_rb->rf_records.rl_count =
  1223. cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
  1224. new_rb->rf_generation = root_rb->rf_generation;
  1225. ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
  1226. if (ret) {
  1227. mlog_errno(ret);
  1228. goto out;
  1229. }
  1230. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1231. ocfs2_journal_dirty(handle, new_bh);
  1232. ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
  1233. mlog(0, "insert new leaf block %llu at %u\n",
  1234. (unsigned long long)new_bh->b_blocknr, new_cpos);
  1235. /* Insert the new leaf block with the specific offset cpos. */
  1236. ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
  1237. 1, 0, meta_ac);
  1238. if (ret)
  1239. mlog_errno(ret);
  1240. out:
  1241. brelse(new_bh);
  1242. return ret;
  1243. }
  1244. static int ocfs2_expand_refcount_tree(handle_t *handle,
  1245. struct ocfs2_caching_info *ci,
  1246. struct buffer_head *ref_root_bh,
  1247. struct buffer_head *ref_leaf_bh,
  1248. struct ocfs2_alloc_context *meta_ac)
  1249. {
  1250. int ret;
  1251. struct buffer_head *expand_bh = NULL;
  1252. if (ref_root_bh == ref_leaf_bh) {
  1253. /*
  1254. * the old root bh hasn't been expanded to a b-tree,
  1255. * so expand it first.
  1256. */
  1257. ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
  1258. &expand_bh, meta_ac);
  1259. if (ret) {
  1260. mlog_errno(ret);
  1261. goto out;
  1262. }
  1263. } else {
  1264. expand_bh = ref_leaf_bh;
  1265. get_bh(expand_bh);
  1266. }
  1267. /* Now add a new refcount block into the tree.*/
  1268. ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
  1269. expand_bh, meta_ac);
  1270. if (ret)
  1271. mlog_errno(ret);
  1272. out:
  1273. brelse(expand_bh);
  1274. return ret;
  1275. }
  1276. /*
  1277. * Adjust the extent rec in b-tree representing ref_leaf_bh.
  1278. *
  1279. * Only called when we have inserted a new refcount rec at index 0
  1280. * which means ocfs2_extent_rec.e_cpos may need some change.
  1281. */
  1282. static int ocfs2_adjust_refcount_rec(handle_t *handle,
  1283. struct ocfs2_caching_info *ci,
  1284. struct buffer_head *ref_root_bh,
  1285. struct buffer_head *ref_leaf_bh,
  1286. struct ocfs2_refcount_rec *rec)
  1287. {
  1288. int ret = 0, i;
  1289. u32 new_cpos, old_cpos;
  1290. struct ocfs2_path *path = NULL;
  1291. struct ocfs2_extent_tree et;
  1292. struct ocfs2_refcount_block *rb =
  1293. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1294. struct ocfs2_extent_list *el;
  1295. if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
  1296. goto out;
  1297. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1298. old_cpos = le32_to_cpu(rb->rf_cpos);
  1299. new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
  1300. if (old_cpos <= new_cpos)
  1301. goto out;
  1302. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  1303. path = ocfs2_new_path_from_et(&et);
  1304. if (!path) {
  1305. ret = -ENOMEM;
  1306. mlog_errno(ret);
  1307. goto out;
  1308. }
  1309. ret = ocfs2_find_path(ci, path, old_cpos);
  1310. if (ret) {
  1311. mlog_errno(ret);
  1312. goto out;
  1313. }
  1314. /*
  1315. * 2 more credits, one for the leaf refcount block, one for
  1316. * the extent block contains the extent rec.
  1317. */
  1318. ret = ocfs2_extend_trans(handle, handle->h_buffer_credits + 2);
  1319. if (ret < 0) {
  1320. mlog_errno(ret);
  1321. goto out;
  1322. }
  1323. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1324. OCFS2_JOURNAL_ACCESS_WRITE);
  1325. if (ret < 0) {
  1326. mlog_errno(ret);
  1327. goto out;
  1328. }
  1329. ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
  1330. OCFS2_JOURNAL_ACCESS_WRITE);
  1331. if (ret < 0) {
  1332. mlog_errno(ret);
  1333. goto out;
  1334. }
  1335. /* change the leaf extent block first. */
  1336. el = path_leaf_el(path);
  1337. for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
  1338. if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
  1339. break;
  1340. BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
  1341. el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
  1342. /* change the r_cpos in the leaf block. */
  1343. rb->rf_cpos = cpu_to_le32(new_cpos);
  1344. ocfs2_journal_dirty(handle, path_leaf_bh(path));
  1345. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1346. out:
  1347. ocfs2_free_path(path);
  1348. return ret;
  1349. }
  1350. static int ocfs2_insert_refcount_rec(handle_t *handle,
  1351. struct ocfs2_caching_info *ci,
  1352. struct buffer_head *ref_root_bh,
  1353. struct buffer_head *ref_leaf_bh,
  1354. struct ocfs2_refcount_rec *rec,
  1355. int index,
  1356. struct ocfs2_alloc_context *meta_ac)
  1357. {
  1358. int ret;
  1359. struct ocfs2_refcount_block *rb =
  1360. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1361. struct ocfs2_refcount_list *rf_list = &rb->rf_records;
  1362. struct buffer_head *new_bh = NULL;
  1363. BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
  1364. if (rf_list->rl_used == rf_list->rl_count) {
  1365. u64 cpos = le64_to_cpu(rec->r_cpos);
  1366. u32 len = le32_to_cpu(rec->r_clusters);
  1367. ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
  1368. ref_leaf_bh, meta_ac);
  1369. if (ret) {
  1370. mlog_errno(ret);
  1371. goto out;
  1372. }
  1373. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1374. cpos, len, NULL, &index,
  1375. &new_bh);
  1376. if (ret) {
  1377. mlog_errno(ret);
  1378. goto out;
  1379. }
  1380. ref_leaf_bh = new_bh;
  1381. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1382. rf_list = &rb->rf_records;
  1383. }
  1384. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1385. OCFS2_JOURNAL_ACCESS_WRITE);
  1386. if (ret) {
  1387. mlog_errno(ret);
  1388. goto out;
  1389. }
  1390. if (index < le16_to_cpu(rf_list->rl_used))
  1391. memmove(&rf_list->rl_recs[index + 1],
  1392. &rf_list->rl_recs[index],
  1393. (le16_to_cpu(rf_list->rl_used) - index) *
  1394. sizeof(struct ocfs2_refcount_rec));
  1395. mlog(0, "insert refcount record start %llu, len %u, count %u "
  1396. "to leaf block %llu at index %d\n",
  1397. (unsigned long long)le64_to_cpu(rec->r_cpos),
  1398. le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount),
  1399. (unsigned long long)ref_leaf_bh->b_blocknr, index);
  1400. rf_list->rl_recs[index] = *rec;
  1401. le16_add_cpu(&rf_list->rl_used, 1);
  1402. ocfs2_refcount_rec_merge(rb, index);
  1403. ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
  1404. if (ret) {
  1405. mlog_errno(ret);
  1406. goto out;
  1407. }
  1408. if (index == 0) {
  1409. ret = ocfs2_adjust_refcount_rec(handle, ci,
  1410. ref_root_bh,
  1411. ref_leaf_bh, rec);
  1412. if (ret)
  1413. mlog_errno(ret);
  1414. }
  1415. out:
  1416. brelse(new_bh);
  1417. return ret;
  1418. }
  1419. /*
  1420. * Split the refcount_rec indexed by "index" in ref_leaf_bh.
  1421. * This is much simple than our b-tree code.
  1422. * split_rec is the new refcount rec we want to insert.
  1423. * If split_rec->r_refcount > 0, we are changing the refcount(in case we
  1424. * increase refcount or decrease a refcount to non-zero).
  1425. * If split_rec->r_refcount == 0, we are punching a hole in current refcount
  1426. * rec( in case we decrease a refcount to zero).
  1427. */
  1428. static int ocfs2_split_refcount_rec(handle_t *handle,
  1429. struct ocfs2_caching_info *ci,
  1430. struct buffer_head *ref_root_bh,
  1431. struct buffer_head *ref_leaf_bh,
  1432. struct ocfs2_refcount_rec *split_rec,
  1433. int index,
  1434. struct ocfs2_alloc_context *meta_ac,
  1435. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1436. {
  1437. int ret, recs_need;
  1438. u32 len;
  1439. struct ocfs2_refcount_block *rb =
  1440. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1441. struct ocfs2_refcount_list *rf_list = &rb->rf_records;
  1442. struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
  1443. struct ocfs2_refcount_rec *tail_rec = NULL;
  1444. struct buffer_head *new_bh = NULL;
  1445. BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
  1446. mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n",
  1447. le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters),
  1448. le64_to_cpu(split_rec->r_cpos),
  1449. le32_to_cpu(split_rec->r_clusters));
  1450. /*
  1451. * If we just need to split the header or tail clusters,
  1452. * no more recs are needed, just split is OK.
  1453. * Otherwise we at least need one new recs.
  1454. */
  1455. if (!split_rec->r_refcount &&
  1456. (split_rec->r_cpos == orig_rec->r_cpos ||
  1457. le64_to_cpu(split_rec->r_cpos) +
  1458. le32_to_cpu(split_rec->r_clusters) ==
  1459. le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
  1460. recs_need = 0;
  1461. else
  1462. recs_need = 1;
  1463. /*
  1464. * We need one more rec if we split in the middle and the new rec have
  1465. * some refcount in it.
  1466. */
  1467. if (split_rec->r_refcount &&
  1468. (split_rec->r_cpos != orig_rec->r_cpos &&
  1469. le64_to_cpu(split_rec->r_cpos) +
  1470. le32_to_cpu(split_rec->r_clusters) !=
  1471. le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
  1472. recs_need++;
  1473. /* If the leaf block don't have enough record, expand it. */
  1474. if (le16_to_cpu(rf_list->rl_used) + recs_need > rf_list->rl_count) {
  1475. struct ocfs2_refcount_rec tmp_rec;
  1476. u64 cpos = le64_to_cpu(orig_rec->r_cpos);
  1477. len = le32_to_cpu(orig_rec->r_clusters);
  1478. ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
  1479. ref_leaf_bh, meta_ac);
  1480. if (ret) {
  1481. mlog_errno(ret);
  1482. goto out;
  1483. }
  1484. /*
  1485. * We have to re-get it since now cpos may be moved to
  1486. * another leaf block.
  1487. */
  1488. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1489. cpos, len, &tmp_rec, &index,
  1490. &new_bh);
  1491. if (ret) {
  1492. mlog_errno(ret);
  1493. goto out;
  1494. }
  1495. ref_leaf_bh = new_bh;
  1496. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1497. rf_list = &rb->rf_records;
  1498. orig_rec = &rf_list->rl_recs[index];
  1499. }
  1500. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1501. OCFS2_JOURNAL_ACCESS_WRITE);
  1502. if (ret) {
  1503. mlog_errno(ret);
  1504. goto out;
  1505. }
  1506. /*
  1507. * We have calculated out how many new records we need and store
  1508. * in recs_need, so spare enough space first by moving the records
  1509. * after "index" to the end.
  1510. */
  1511. if (index != le16_to_cpu(rf_list->rl_used) - 1)
  1512. memmove(&rf_list->rl_recs[index + 1 + recs_need],
  1513. &rf_list->rl_recs[index + 1],
  1514. (le16_to_cpu(rf_list->rl_used) - index - 1) *
  1515. sizeof(struct ocfs2_refcount_rec));
  1516. len = (le64_to_cpu(orig_rec->r_cpos) +
  1517. le32_to_cpu(orig_rec->r_clusters)) -
  1518. (le64_to_cpu(split_rec->r_cpos) +
  1519. le32_to_cpu(split_rec->r_clusters));
  1520. /*
  1521. * If we have "len", the we will split in the tail and move it
  1522. * to the end of the space we have just spared.
  1523. */
  1524. if (len) {
  1525. tail_rec = &rf_list->rl_recs[index + recs_need];
  1526. memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
  1527. le64_add_cpu(&tail_rec->r_cpos,
  1528. le32_to_cpu(tail_rec->r_clusters) - len);
  1529. tail_rec->r_clusters = le32_to_cpu(len);
  1530. }
  1531. /*
  1532. * If the split pos isn't the same as the original one, we need to
  1533. * split in the head.
  1534. *
  1535. * Note: We have the chance that split_rec.r_refcount = 0,
  1536. * recs_need = 0 and len > 0, which means we just cut the head from
  1537. * the orig_rec and in that case we have done some modification in
  1538. * orig_rec above, so the check for r_cpos is faked.
  1539. */
  1540. if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
  1541. len = le64_to_cpu(split_rec->r_cpos) -
  1542. le64_to_cpu(orig_rec->r_cpos);
  1543. orig_rec->r_clusters = cpu_to_le32(len);
  1544. index++;
  1545. }
  1546. le16_add_cpu(&rf_list->rl_used, recs_need);
  1547. if (split_rec->r_refcount) {
  1548. rf_list->rl_recs[index] = *split_rec;
  1549. mlog(0, "insert refcount record start %llu, len %u, count %u "
  1550. "to leaf block %llu at index %d\n",
  1551. (unsigned long long)le64_to_cpu(split_rec->r_cpos),
  1552. le32_to_cpu(split_rec->r_clusters),
  1553. le32_to_cpu(split_rec->r_refcount),
  1554. (unsigned long long)ref_leaf_bh->b_blocknr, index);
  1555. ocfs2_refcount_rec_merge(rb, index);
  1556. }
  1557. ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
  1558. if (ret)
  1559. mlog_errno(ret);
  1560. out:
  1561. brelse(new_bh);
  1562. return ret;
  1563. }
  1564. static int __ocfs2_increase_refcount(handle_t *handle,
  1565. struct ocfs2_caching_info *ci,
  1566. struct buffer_head *ref_root_bh,
  1567. u64 cpos, u32 len,
  1568. struct ocfs2_alloc_context *meta_ac,
  1569. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1570. {
  1571. int ret = 0, index;
  1572. struct buffer_head *ref_leaf_bh = NULL;
  1573. struct ocfs2_refcount_rec rec;
  1574. unsigned int set_len = 0;
  1575. mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n",
  1576. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  1577. (unsigned long long)cpos, len);
  1578. while (len) {
  1579. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1580. cpos, len, &rec, &index,
  1581. &ref_leaf_bh);
  1582. if (ret) {
  1583. mlog_errno(ret);
  1584. goto out;
  1585. }
  1586. set_len = le32_to_cpu(rec.r_clusters);
  1587. /*
  1588. * Here we may meet with 3 situations:
  1589. *
  1590. * 1. If we find an already existing record, and the length
  1591. * is the same, cool, we just need to increase the r_refcount
  1592. * and it is OK.
  1593. * 2. If we find a hole, just insert it with r_refcount = 1.
  1594. * 3. If we are in the middle of one extent record, split
  1595. * it.
  1596. */
  1597. if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
  1598. set_len <= len) {
  1599. mlog(0, "increase refcount rec, start %llu, len %u, "
  1600. "count %u\n", (unsigned long long)cpos, set_len,
  1601. le32_to_cpu(rec.r_refcount));
  1602. ret = ocfs2_change_refcount_rec(handle, ci,
  1603. ref_leaf_bh, index, 1);
  1604. if (ret) {
  1605. mlog_errno(ret);
  1606. goto out;
  1607. }
  1608. } else if (!rec.r_refcount) {
  1609. rec.r_refcount = cpu_to_le32(1);
  1610. mlog(0, "insert refcount rec, start %llu, len %u\n",
  1611. (unsigned long long)le64_to_cpu(rec.r_cpos),
  1612. set_len);
  1613. ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
  1614. ref_leaf_bh,
  1615. &rec, index, meta_ac);
  1616. if (ret) {
  1617. mlog_errno(ret);
  1618. goto out;
  1619. }
  1620. } else {
  1621. set_len = min((u64)(cpos + len),
  1622. le64_to_cpu(rec.r_cpos) + set_len) - cpos;
  1623. rec.r_cpos = cpu_to_le64(cpos);
  1624. rec.r_clusters = cpu_to_le32(set_len);
  1625. le32_add_cpu(&rec.r_refcount, 1);
  1626. mlog(0, "split refcount rec, start %llu, "
  1627. "len %u, count %u\n",
  1628. (unsigned long long)le64_to_cpu(rec.r_cpos),
  1629. set_len, le32_to_cpu(rec.r_refcount));
  1630. ret = ocfs2_split_refcount_rec(handle, ci,
  1631. ref_root_bh, ref_leaf_bh,
  1632. &rec, index,
  1633. meta_ac, dealloc);
  1634. if (ret) {
  1635. mlog_errno(ret);
  1636. goto out;
  1637. }
  1638. }
  1639. cpos += set_len;
  1640. len -= set_len;
  1641. brelse(ref_leaf_bh);
  1642. ref_leaf_bh = NULL;
  1643. }
  1644. out:
  1645. brelse(ref_leaf_bh);
  1646. return ret;
  1647. }
  1648. static int ocfs2_remove_refcount_extent(handle_t *handle,
  1649. struct ocfs2_caching_info *ci,
  1650. struct buffer_head *ref_root_bh,
  1651. struct buffer_head *ref_leaf_bh,
  1652. struct ocfs2_alloc_context *meta_ac,
  1653. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1654. {
  1655. int ret;
  1656. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1657. struct ocfs2_refcount_block *rb =
  1658. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1659. struct ocfs2_extent_tree et;
  1660. BUG_ON(rb->rf_records.rl_used);
  1661. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  1662. ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
  1663. 1, meta_ac, dealloc);
  1664. if (ret) {
  1665. mlog_errno(ret);
  1666. goto out;
  1667. }
  1668. ocfs2_remove_from_cache(ci, ref_leaf_bh);
  1669. /*
  1670. * add the freed block to the dealloc so that it will be freed
  1671. * when we run dealloc.
  1672. */
  1673. ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
  1674. le16_to_cpu(rb->rf_suballoc_slot),
  1675. le64_to_cpu(rb->rf_blkno),
  1676. le16_to_cpu(rb->rf_suballoc_bit));
  1677. if (ret) {
  1678. mlog_errno(ret);
  1679. goto out;
  1680. }
  1681. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  1682. OCFS2_JOURNAL_ACCESS_WRITE);
  1683. if (ret) {
  1684. mlog_errno(ret);
  1685. goto out;
  1686. }
  1687. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1688. le32_add_cpu(&rb->rf_clusters, -1);
  1689. /*
  1690. * check whether we need to restore the root refcount block if
  1691. * there is no leaf extent block at atll.
  1692. */
  1693. if (!rb->rf_list.l_next_free_rec) {
  1694. BUG_ON(rb->rf_clusters);
  1695. mlog(0, "reset refcount tree root %llu to be a record block.\n",
  1696. (unsigned long long)ref_root_bh->b_blocknr);
  1697. rb->rf_flags = 0;
  1698. rb->rf_parent = 0;
  1699. rb->rf_cpos = 0;
  1700. memset(&rb->rf_records, 0, sb->s_blocksize -
  1701. offsetof(struct ocfs2_refcount_block, rf_records));
  1702. rb->rf_records.rl_count =
  1703. cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
  1704. }
  1705. ocfs2_journal_dirty(handle, ref_root_bh);
  1706. out:
  1707. return ret;
  1708. }
  1709. static int ocfs2_decrease_refcount_rec(handle_t *handle,
  1710. struct ocfs2_caching_info *ci,
  1711. struct buffer_head *ref_root_bh,
  1712. struct buffer_head *ref_leaf_bh,
  1713. int index, u64 cpos, unsigned int len,
  1714. struct ocfs2_alloc_context *meta_ac,
  1715. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1716. {
  1717. int ret;
  1718. struct ocfs2_refcount_block *rb =
  1719. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1720. struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
  1721. BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
  1722. BUG_ON(cpos + len >
  1723. le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
  1724. if (cpos == le64_to_cpu(rec->r_cpos) &&
  1725. len == le32_to_cpu(rec->r_clusters))
  1726. ret = ocfs2_change_refcount_rec(handle, ci,
  1727. ref_leaf_bh, index, -1);
  1728. else {
  1729. struct ocfs2_refcount_rec split = *rec;
  1730. split.r_cpos = cpu_to_le64(cpos);
  1731. split.r_clusters = cpu_to_le32(len);
  1732. le32_add_cpu(&split.r_refcount, -1);
  1733. mlog(0, "split refcount rec, start %llu, "
  1734. "len %u, count %u, original start %llu, len %u\n",
  1735. (unsigned long long)le64_to_cpu(split.r_cpos),
  1736. len, le32_to_cpu(split.r_refcount),
  1737. (unsigned long long)le64_to_cpu(rec->r_cpos),
  1738. le32_to_cpu(rec->r_clusters));
  1739. ret = ocfs2_split_refcount_rec(handle, ci,
  1740. ref_root_bh, ref_leaf_bh,
  1741. &split, index,
  1742. meta_ac, dealloc);
  1743. }
  1744. if (ret) {
  1745. mlog_errno(ret);
  1746. goto out;
  1747. }
  1748. /* Remove the leaf refcount block if it contains no refcount record. */
  1749. if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
  1750. ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
  1751. ref_leaf_bh, meta_ac,
  1752. dealloc);
  1753. if (ret)
  1754. mlog_errno(ret);
  1755. }
  1756. out:
  1757. return ret;
  1758. }
  1759. static int __ocfs2_decrease_refcount(handle_t *handle,
  1760. struct ocfs2_caching_info *ci,
  1761. struct buffer_head *ref_root_bh,
  1762. u64 cpos, u32 len,
  1763. struct ocfs2_alloc_context *meta_ac,
  1764. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1765. {
  1766. int ret = 0, index = 0;
  1767. struct ocfs2_refcount_rec rec;
  1768. unsigned int r_count = 0, r_len;
  1769. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1770. struct buffer_head *ref_leaf_bh = NULL;
  1771. mlog(0, "Tree owner %llu, decrease refcount start %llu, len %u\n",
  1772. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  1773. (unsigned long long)cpos, len);
  1774. while (len) {
  1775. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1776. cpos, len, &rec, &index,
  1777. &ref_leaf_bh);
  1778. if (ret) {
  1779. mlog_errno(ret);
  1780. goto out;
  1781. }
  1782. r_count = le32_to_cpu(rec.r_refcount);
  1783. BUG_ON(r_count == 0);
  1784. r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
  1785. le32_to_cpu(rec.r_clusters)) - cpos;
  1786. ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
  1787. ref_leaf_bh, index,
  1788. cpos, r_len,
  1789. meta_ac, dealloc);
  1790. if (ret) {
  1791. mlog_errno(ret);
  1792. goto out;
  1793. }
  1794. if (le32_to_cpu(rec.r_refcount) == 1) {
  1795. ret = ocfs2_cache_cluster_dealloc(dealloc,
  1796. ocfs2_clusters_to_blocks(sb, cpos),
  1797. r_len);
  1798. if (ret) {
  1799. mlog_errno(ret);
  1800. goto out;
  1801. }
  1802. }
  1803. cpos += r_len;
  1804. len -= r_len;
  1805. brelse(ref_leaf_bh);
  1806. ref_leaf_bh = NULL;
  1807. }
  1808. out:
  1809. brelse(ref_leaf_bh);
  1810. return ret;
  1811. }
  1812. /* Caller must hold refcount tree lock. */
  1813. int ocfs2_decrease_refcount(struct inode *inode,
  1814. handle_t *handle, u32 cpos, u32 len,
  1815. struct ocfs2_alloc_context *meta_ac,
  1816. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1817. {
  1818. int ret;
  1819. u64 ref_blkno;
  1820. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  1821. struct buffer_head *ref_root_bh = NULL;
  1822. struct ocfs2_refcount_tree *tree;
  1823. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  1824. ret = ocfs2_get_refcount_block(inode, &ref_blkno);
  1825. if (ret) {
  1826. mlog_errno(ret);
  1827. goto out;
  1828. }
  1829. ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
  1830. if (ret) {
  1831. mlog_errno(ret);
  1832. goto out;
  1833. }
  1834. ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
  1835. &ref_root_bh);
  1836. if (ret) {
  1837. mlog_errno(ret);
  1838. goto out;
  1839. }
  1840. ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
  1841. cpos, len, meta_ac, dealloc);
  1842. if (ret)
  1843. mlog_errno(ret);
  1844. out:
  1845. brelse(ref_root_bh);
  1846. return ret;
  1847. }
  1848. /*
  1849. * Mark the already-existing extent at cpos as refcounted for len clusters.
  1850. * This adds the refcount extent flag.
  1851. *
  1852. * If the existing extent is larger than the request, initiate a
  1853. * split. An attempt will be made at merging with adjacent extents.
  1854. *
  1855. * The caller is responsible for passing down meta_ac if we'll need it.
  1856. */
  1857. static int ocfs2_mark_extent_refcounted(struct inode *inode,
  1858. struct ocfs2_extent_tree *et,
  1859. handle_t *handle, u32 cpos,
  1860. u32 len, u32 phys,
  1861. struct ocfs2_alloc_context *meta_ac,
  1862. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1863. {
  1864. int ret;
  1865. mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n",
  1866. inode->i_ino, cpos, len, phys);
  1867. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  1868. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  1869. "tree, but the feature bit is not set in the "
  1870. "super block.", inode->i_ino);
  1871. ret = -EROFS;
  1872. goto out;
  1873. }
  1874. ret = ocfs2_change_extent_flag(handle, et, cpos,
  1875. len, phys, meta_ac, dealloc,
  1876. OCFS2_EXT_REFCOUNTED, 0);
  1877. if (ret)
  1878. mlog_errno(ret);
  1879. out:
  1880. return ret;
  1881. }
  1882. /*
  1883. * Given some contiguous physical clusters, calculate what we need
  1884. * for modifying their refcount.
  1885. */
  1886. static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
  1887. struct ocfs2_caching_info *ci,
  1888. struct buffer_head *ref_root_bh,
  1889. u64 start_cpos,
  1890. u32 clusters,
  1891. int *meta_add,
  1892. int *credits)
  1893. {
  1894. int ret = 0, index, ref_blocks = 0, recs_add = 0;
  1895. u64 cpos = start_cpos;
  1896. struct ocfs2_refcount_block *rb;
  1897. struct ocfs2_refcount_rec rec;
  1898. struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
  1899. u32 len;
  1900. mlog(0, "start_cpos %llu, clusters %u\n",
  1901. (unsigned long long)start_cpos, clusters);
  1902. while (clusters) {
  1903. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1904. cpos, clusters, &rec,
  1905. &index, &ref_leaf_bh);
  1906. if (ret) {
  1907. mlog_errno(ret);
  1908. goto out;
  1909. }
  1910. if (ref_leaf_bh != prev_bh) {
  1911. /*
  1912. * Now we encounter a new leaf block, so calculate
  1913. * whether we need to extend the old leaf.
  1914. */
  1915. if (prev_bh) {
  1916. rb = (struct ocfs2_refcount_block *)
  1917. prev_bh->b_data;
  1918. if (le64_to_cpu(rb->rf_records.rl_used) +
  1919. recs_add >
  1920. le16_to_cpu(rb->rf_records.rl_count))
  1921. ref_blocks++;
  1922. }
  1923. recs_add = 0;
  1924. *credits += 1;
  1925. brelse(prev_bh);
  1926. prev_bh = ref_leaf_bh;
  1927. get_bh(prev_bh);
  1928. }
  1929. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1930. mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu,"
  1931. "rec->r_clusters %u, rec->r_refcount %u, index %d\n",
  1932. recs_add, (unsigned long long)cpos, clusters,
  1933. (unsigned long long)le64_to_cpu(rec.r_cpos),
  1934. le32_to_cpu(rec.r_clusters),
  1935. le32_to_cpu(rec.r_refcount), index);
  1936. len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
  1937. le32_to_cpu(rec.r_clusters)) - cpos;
  1938. /*
  1939. * If the refcount rec already exist, cool. We just need
  1940. * to check whether there is a split. Otherwise we just need
  1941. * to increase the refcount.
  1942. * If we will insert one, increases recs_add.
  1943. *
  1944. * We record all the records which will be inserted to the
  1945. * same refcount block, so that we can tell exactly whether
  1946. * we need a new refcount block or not.
  1947. */
  1948. if (rec.r_refcount) {
  1949. /* Check whether we need a split at the beginning. */
  1950. if (cpos == start_cpos &&
  1951. cpos != le64_to_cpu(rec.r_cpos))
  1952. recs_add++;
  1953. /* Check whether we need a split in the end. */
  1954. if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
  1955. le32_to_cpu(rec.r_clusters))
  1956. recs_add++;
  1957. } else
  1958. recs_add++;
  1959. brelse(ref_leaf_bh);
  1960. ref_leaf_bh = NULL;
  1961. clusters -= len;
  1962. cpos += len;
  1963. }
  1964. if (prev_bh) {
  1965. rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
  1966. if (le64_to_cpu(rb->rf_records.rl_used) + recs_add >
  1967. le16_to_cpu(rb->rf_records.rl_count))
  1968. ref_blocks++;
  1969. *credits += 1;
  1970. }
  1971. if (!ref_blocks)
  1972. goto out;
  1973. mlog(0, "we need ref_blocks %d\n", ref_blocks);
  1974. *meta_add += ref_blocks;
  1975. *credits += ref_blocks;
  1976. /*
  1977. * So we may need ref_blocks to insert into the tree.
  1978. * That also means we need to change the b-tree and add that number
  1979. * of records since we never merge them.
  1980. * We need one more block for expansion since the new created leaf
  1981. * block is also full and needs split.
  1982. */
  1983. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1984. if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
  1985. struct ocfs2_extent_tree et;
  1986. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  1987. *meta_add += ocfs2_extend_meta_needed(et.et_root_el);
  1988. *credits += ocfs2_calc_extend_credits(sb,
  1989. et.et_root_el,
  1990. ref_blocks);
  1991. } else {
  1992. *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
  1993. *meta_add += 1;
  1994. }
  1995. out:
  1996. brelse(ref_leaf_bh);
  1997. brelse(prev_bh);
  1998. return ret;
  1999. }
  2000. /*
  2001. * For refcount tree, we will decrease some contiguous clusters
  2002. * refcount count, so just go through it to see how many blocks
  2003. * we gonna touch and whether we need to create new blocks.
  2004. *
  2005. * Normally the refcount blocks store these refcount should be
  2006. * continguous also, so that we can get the number easily.
  2007. * As for meta_ac, we will at most add split 2 refcount record and
  2008. * 2 more refcount block, so just check it in a rough way.
  2009. *
  2010. * Caller must hold refcount tree lock.
  2011. */
  2012. int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
  2013. struct buffer_head *di_bh,
  2014. u64 phys_blkno,
  2015. u32 clusters,
  2016. int *credits,
  2017. struct ocfs2_alloc_context **meta_ac)
  2018. {
  2019. int ret, ref_blocks = 0;
  2020. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  2021. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  2022. struct buffer_head *ref_root_bh = NULL;
  2023. struct ocfs2_refcount_tree *tree;
  2024. u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
  2025. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  2026. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  2027. "tree, but the feature bit is not set in the "
  2028. "super block.", inode->i_ino);
  2029. ret = -EROFS;
  2030. goto out;
  2031. }
  2032. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  2033. ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
  2034. le64_to_cpu(di->i_refcount_loc), &tree);
  2035. if (ret) {
  2036. mlog_errno(ret);
  2037. goto out;
  2038. }
  2039. ret = ocfs2_read_refcount_block(&tree->rf_ci,
  2040. le64_to_cpu(di->i_refcount_loc),
  2041. &ref_root_bh);
  2042. if (ret) {
  2043. mlog_errno(ret);
  2044. goto out;
  2045. }
  2046. ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
  2047. &tree->rf_ci,
  2048. ref_root_bh,
  2049. start_cpos, clusters,
  2050. &ref_blocks, credits);
  2051. if (ret) {
  2052. mlog_errno(ret);
  2053. goto out;
  2054. }
  2055. mlog(0, "reserve new metadata %d, credits = %d\n",
  2056. ref_blocks, *credits);
  2057. if (ref_blocks) {
  2058. ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
  2059. ref_blocks, meta_ac);
  2060. if (ret)
  2061. mlog_errno(ret);
  2062. }
  2063. out:
  2064. brelse(ref_root_bh);
  2065. return ret;
  2066. }
  2067. #define MAX_CONTIG_BYTES 1048576
  2068. static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
  2069. {
  2070. return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
  2071. }
  2072. static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
  2073. {
  2074. return ~(ocfs2_cow_contig_clusters(sb) - 1);
  2075. }
  2076. /*
  2077. * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
  2078. * find an offset (start + (n * contig_clusters)) that is closest to cpos
  2079. * while still being less than or equal to it.
  2080. *
  2081. * The goal is to break the extent at a multiple of contig_clusters.
  2082. */
  2083. static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
  2084. unsigned int start,
  2085. unsigned int cpos)
  2086. {
  2087. BUG_ON(start > cpos);
  2088. return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
  2089. }
  2090. /*
  2091. * Given a cluster count of len, pad it out so that it is a multiple
  2092. * of contig_clusters.
  2093. */
  2094. static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
  2095. unsigned int len)
  2096. {
  2097. unsigned int padded =
  2098. (len + (ocfs2_cow_contig_clusters(sb) - 1)) &
  2099. ocfs2_cow_contig_mask(sb);
  2100. /* Did we wrap? */
  2101. if (padded < len)
  2102. padded = UINT_MAX;
  2103. return padded;
  2104. }
  2105. /*
  2106. * Calculate out the start and number of virtual clusters we need to to CoW.
  2107. *
  2108. * cpos is vitual start cluster position we want to do CoW in a
  2109. * file and write_len is the cluster length.
  2110. *
  2111. * Normal we will start CoW from the beginning of extent record cotaining cpos.
  2112. * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
  2113. * get good I/O from the resulting extent tree.
  2114. */
  2115. static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
  2116. struct buffer_head *di_bh,
  2117. u32 cpos,
  2118. u32 write_len,
  2119. u32 *cow_start,
  2120. u32 *cow_len)
  2121. {
  2122. int ret = 0;
  2123. struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
  2124. struct ocfs2_extent_list *el = &di->id2.i_list;
  2125. int tree_height = le16_to_cpu(el->l_tree_depth), i;
  2126. struct buffer_head *eb_bh = NULL;
  2127. struct ocfs2_extent_block *eb = NULL;
  2128. struct ocfs2_extent_rec *rec;
  2129. unsigned int want_clusters, rec_end = 0;
  2130. int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
  2131. int leaf_clusters;
  2132. if (tree_height > 0) {
  2133. ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
  2134. if (ret) {
  2135. mlog_errno(ret);
  2136. goto out;
  2137. }
  2138. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  2139. el = &eb->h_list;
  2140. if (el->l_tree_depth) {
  2141. ocfs2_error(inode->i_sb,
  2142. "Inode %lu has non zero tree depth in "
  2143. "leaf block %llu\n", inode->i_ino,
  2144. (unsigned long long)eb_bh->b_blocknr);
  2145. ret = -EROFS;
  2146. goto out;
  2147. }
  2148. }
  2149. *cow_len = 0;
  2150. for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
  2151. rec = &el->l_recs[i];
  2152. if (ocfs2_is_empty_extent(rec)) {
  2153. mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
  2154. "index %d\n", inode->i_ino, i);
  2155. continue;
  2156. }
  2157. if (le32_to_cpu(rec->e_cpos) +
  2158. le16_to_cpu(rec->e_leaf_clusters) <= cpos)
  2159. continue;
  2160. if (*cow_len == 0) {
  2161. /*
  2162. * We should find a refcounted record in the
  2163. * first pass.
  2164. */
  2165. BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
  2166. *cow_start = le32_to_cpu(rec->e_cpos);
  2167. }
  2168. /*
  2169. * If we encounter a hole or a non-refcounted record,
  2170. * stop the search.
  2171. */
  2172. if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
  2173. (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)))
  2174. break;
  2175. leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
  2176. rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
  2177. /*
  2178. * How many clusters do we actually need from
  2179. * this extent? First we see how many we actually
  2180. * need to complete the write. If that's smaller
  2181. * than contig_clusters, we try for contig_clusters.
  2182. */
  2183. if (!*cow_len)
  2184. want_clusters = write_len;
  2185. else
  2186. want_clusters = (cpos + write_len) -
  2187. (*cow_start + *cow_len);
  2188. if (want_clusters < contig_clusters)
  2189. want_clusters = contig_clusters;
  2190. /*
  2191. * If the write does not cover the whole extent, we
  2192. * need to calculate how we're going to split the extent.
  2193. * We try to do it on contig_clusters boundaries.
  2194. *
  2195. * Any extent smaller than contig_clusters will be
  2196. * CoWed in its entirety.
  2197. */
  2198. if (leaf_clusters <= contig_clusters)
  2199. *cow_len += leaf_clusters;
  2200. else if (*cow_len || (*cow_start == cpos)) {
  2201. /*
  2202. * This extent needs to be CoW'd from its
  2203. * beginning, so all we have to do is compute
  2204. * how many clusters to grab. We align
  2205. * want_clusters to the edge of contig_clusters
  2206. * to get better I/O.
  2207. */
  2208. want_clusters = ocfs2_cow_align_length(inode->i_sb,
  2209. want_clusters);
  2210. if (leaf_clusters < want_clusters)
  2211. *cow_len += leaf_clusters;
  2212. else
  2213. *cow_len += want_clusters;
  2214. } else if ((*cow_start + contig_clusters) >=
  2215. (cpos + write_len)) {
  2216. /*
  2217. * Breaking off contig_clusters at the front
  2218. * of the extent will cover our write. That's
  2219. * easy.
  2220. */
  2221. *cow_len = contig_clusters;
  2222. } else if ((rec_end - cpos) <= contig_clusters) {
  2223. /*
  2224. * Breaking off contig_clusters at the tail of
  2225. * this extent will cover cpos.
  2226. */
  2227. *cow_start = rec_end - contig_clusters;
  2228. *cow_len = contig_clusters;
  2229. } else if ((rec_end - cpos) <= want_clusters) {
  2230. /*
  2231. * While we can't fit the entire write in this
  2232. * extent, we know that the write goes from cpos
  2233. * to the end of the extent. Break that off.
  2234. * We try to break it at some multiple of
  2235. * contig_clusters from the front of the extent.
  2236. * Failing that (ie, cpos is within
  2237. * contig_clusters of the front), we'll CoW the
  2238. * entire extent.
  2239. */
  2240. *cow_start = ocfs2_cow_align_start(inode->i_sb,
  2241. *cow_start, cpos);
  2242. *cow_len = rec_end - *cow_start;
  2243. } else {
  2244. /*
  2245. * Ok, the entire write lives in the middle of
  2246. * this extent. Let's try to slice the extent up
  2247. * nicely. Optimally, our CoW region starts at
  2248. * m*contig_clusters from the beginning of the
  2249. * extent and goes for n*contig_clusters,
  2250. * covering the entire write.
  2251. */
  2252. *cow_start = ocfs2_cow_align_start(inode->i_sb,
  2253. *cow_start, cpos);
  2254. want_clusters = (cpos + write_len) - *cow_start;
  2255. want_clusters = ocfs2_cow_align_length(inode->i_sb,
  2256. want_clusters);
  2257. if (*cow_start + want_clusters <= rec_end)
  2258. *cow_len = want_clusters;
  2259. else
  2260. *cow_len = rec_end - *cow_start;
  2261. }
  2262. /* Have we covered our entire write yet? */
  2263. if ((*cow_start + *cow_len) >= (cpos + write_len))
  2264. break;
  2265. /*
  2266. * If we reach the end of the extent block and don't get enough
  2267. * clusters, continue with the next extent block if possible.
  2268. */
  2269. if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
  2270. eb && eb->h_next_leaf_blk) {
  2271. brelse(eb_bh);
  2272. eb_bh = NULL;
  2273. ret = ocfs2_read_extent_block(INODE_CACHE(inode),
  2274. le64_to_cpu(eb->h_next_leaf_blk),
  2275. &eb_bh);
  2276. if (ret) {
  2277. mlog_errno(ret);
  2278. goto out;
  2279. }
  2280. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  2281. el = &eb->h_list;
  2282. i = -1;
  2283. }
  2284. }
  2285. out:
  2286. brelse(eb_bh);
  2287. return ret;
  2288. }
  2289. /*
  2290. * Prepare meta_ac, data_ac and calculate credits when we want to add some
  2291. * num_clusters in data_tree "et" and change the refcount for the old
  2292. * clusters(starting form p_cluster) in the refcount tree.
  2293. *
  2294. * Note:
  2295. * 1. since we may split the old tree, so we at most will need num_clusters + 2
  2296. * more new leaf records.
  2297. * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
  2298. * just give data_ac = NULL.
  2299. */
  2300. static int ocfs2_lock_refcount_allocators(struct super_block *sb,
  2301. u32 p_cluster, u32 num_clusters,
  2302. struct ocfs2_extent_tree *et,
  2303. struct ocfs2_caching_info *ref_ci,
  2304. struct buffer_head *ref_root_bh,
  2305. struct ocfs2_alloc_context **meta_ac,
  2306. struct ocfs2_alloc_context **data_ac,
  2307. int *credits)
  2308. {
  2309. int ret = 0, meta_add = 0;
  2310. int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et);
  2311. if (num_free_extents < 0) {
  2312. ret = num_free_extents;
  2313. mlog_errno(ret);
  2314. goto out;
  2315. }
  2316. if (num_free_extents < num_clusters + 2)
  2317. meta_add =
  2318. ocfs2_extend_meta_needed(et->et_root_el);
  2319. *credits += ocfs2_calc_extend_credits(sb, et->et_root_el,
  2320. num_clusters + 2);
  2321. ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
  2322. p_cluster, num_clusters,
  2323. &meta_add, credits);
  2324. if (ret) {
  2325. mlog_errno(ret);
  2326. goto out;
  2327. }
  2328. mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n",
  2329. meta_add, num_clusters, *credits);
  2330. ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
  2331. meta_ac);
  2332. if (ret) {
  2333. mlog_errno(ret);
  2334. goto out;
  2335. }
  2336. if (data_ac) {
  2337. ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
  2338. data_ac);
  2339. if (ret)
  2340. mlog_errno(ret);
  2341. }
  2342. out:
  2343. if (ret) {
  2344. if (*meta_ac) {
  2345. ocfs2_free_alloc_context(*meta_ac);
  2346. *meta_ac = NULL;
  2347. }
  2348. }
  2349. return ret;
  2350. }
  2351. static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
  2352. {
  2353. BUG_ON(buffer_dirty(bh));
  2354. clear_buffer_mapped(bh);
  2355. return 0;
  2356. }
  2357. static int ocfs2_duplicate_clusters(handle_t *handle,
  2358. struct ocfs2_cow_context *context,
  2359. u32 cpos, u32 old_cluster,
  2360. u32 new_cluster, u32 new_len)
  2361. {
  2362. int ret = 0, partial;
  2363. struct ocfs2_caching_info *ci = context->di_et.et_ci;
  2364. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  2365. u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
  2366. struct page *page;
  2367. pgoff_t page_index;
  2368. unsigned int from, to;
  2369. loff_t offset, end, map_end;
  2370. struct address_space *mapping = context->inode->i_mapping;
  2371. mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster,
  2372. new_cluster, new_len, cpos);
  2373. offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
  2374. end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
  2375. while (offset < end) {
  2376. page_index = offset >> PAGE_CACHE_SHIFT;
  2377. map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
  2378. if (map_end > end)
  2379. map_end = end;
  2380. /* from, to is the offset within the page. */
  2381. from = offset & (PAGE_CACHE_SIZE - 1);
  2382. to = PAGE_CACHE_SIZE;
  2383. if (map_end & (PAGE_CACHE_SIZE - 1))
  2384. to = map_end & (PAGE_CACHE_SIZE - 1);
  2385. page = grab_cache_page(mapping, page_index);
  2386. /* This page can't be dirtied before we CoW it out. */
  2387. BUG_ON(PageDirty(page));
  2388. if (!PageUptodate(page)) {
  2389. ret = block_read_full_page(page, ocfs2_get_block);
  2390. if (ret) {
  2391. mlog_errno(ret);
  2392. goto unlock;
  2393. }
  2394. lock_page(page);
  2395. }
  2396. if (page_has_buffers(page)) {
  2397. ret = walk_page_buffers(handle, page_buffers(page),
  2398. from, to, &partial,
  2399. ocfs2_clear_cow_buffer);
  2400. if (ret) {
  2401. mlog_errno(ret);
  2402. goto unlock;
  2403. }
  2404. }
  2405. ocfs2_map_and_dirty_page(context->inode,
  2406. handle, from, to,
  2407. page, 0, &new_block);
  2408. mark_page_accessed(page);
  2409. unlock:
  2410. unlock_page(page);
  2411. page_cache_release(page);
  2412. page = NULL;
  2413. offset = map_end;
  2414. if (ret)
  2415. break;
  2416. }
  2417. return ret;
  2418. }
  2419. static int ocfs2_clear_ext_refcount(handle_t *handle,
  2420. struct ocfs2_extent_tree *et,
  2421. u32 cpos, u32 p_cluster, u32 len,
  2422. unsigned int ext_flags,
  2423. struct ocfs2_alloc_context *meta_ac,
  2424. struct ocfs2_cached_dealloc_ctxt *dealloc)
  2425. {
  2426. int ret, index;
  2427. struct ocfs2_extent_rec replace_rec;
  2428. struct ocfs2_path *path = NULL;
  2429. struct ocfs2_extent_list *el;
  2430. struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
  2431. u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
  2432. mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n",
  2433. (unsigned long long)ino, cpos, len, p_cluster, ext_flags);
  2434. memset(&replace_rec, 0, sizeof(replace_rec));
  2435. replace_rec.e_cpos = cpu_to_le32(cpos);
  2436. replace_rec.e_leaf_clusters = cpu_to_le16(len);
  2437. replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
  2438. p_cluster));
  2439. replace_rec.e_flags = ext_flags;
  2440. replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
  2441. path = ocfs2_new_path_from_et(et);
  2442. if (!path) {
  2443. ret = -ENOMEM;
  2444. mlog_errno(ret);
  2445. goto out;
  2446. }
  2447. ret = ocfs2_find_path(et->et_ci, path, cpos);
  2448. if (ret) {
  2449. mlog_errno(ret);
  2450. goto out;
  2451. }
  2452. el = path_leaf_el(path);
  2453. index = ocfs2_search_extent_list(el, cpos);
  2454. if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
  2455. ocfs2_error(sb,
  2456. "Inode %llu has an extent at cpos %u which can no "
  2457. "longer be found.\n",
  2458. (unsigned long long)ino, cpos);
  2459. ret = -EROFS;
  2460. goto out;
  2461. }
  2462. ret = ocfs2_split_extent(handle, et, path, index,
  2463. &replace_rec, meta_ac, dealloc);
  2464. if (ret)
  2465. mlog_errno(ret);
  2466. out:
  2467. ocfs2_free_path(path);
  2468. return ret;
  2469. }
  2470. static int ocfs2_replace_clusters(handle_t *handle,
  2471. struct ocfs2_cow_context *context,
  2472. u32 cpos, u32 old,
  2473. u32 new, u32 len,
  2474. unsigned int ext_flags)
  2475. {
  2476. int ret;
  2477. struct ocfs2_caching_info *ci = context->di_et.et_ci;
  2478. u64 ino = ocfs2_metadata_cache_owner(ci);
  2479. mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n",
  2480. (unsigned long long)ino, cpos, old, new, len, ext_flags);
  2481. /*If the old clusters is unwritten, no need to duplicate. */
  2482. if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
  2483. ret = ocfs2_duplicate_clusters(handle, context, cpos,
  2484. old, new, len);
  2485. if (ret) {
  2486. mlog_errno(ret);
  2487. goto out;
  2488. }
  2489. }
  2490. ret = ocfs2_clear_ext_refcount(handle, &context->di_et,
  2491. cpos, new, len, ext_flags,
  2492. context->meta_ac, &context->dealloc);
  2493. if (ret)
  2494. mlog_errno(ret);
  2495. out:
  2496. return ret;
  2497. }
  2498. static int ocfs2_cow_sync_writeback(struct super_block *sb,
  2499. struct ocfs2_cow_context *context,
  2500. u32 cpos, u32 num_clusters)
  2501. {
  2502. int ret = 0;
  2503. loff_t offset, end, map_end;
  2504. pgoff_t page_index;
  2505. struct page *page;
  2506. if (ocfs2_should_order_data(context->inode))
  2507. return 0;
  2508. offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
  2509. end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
  2510. ret = filemap_fdatawrite_range(context->inode->i_mapping,
  2511. offset, end - 1);
  2512. if (ret < 0) {
  2513. mlog_errno(ret);
  2514. return ret;
  2515. }
  2516. while (offset < end) {
  2517. page_index = offset >> PAGE_CACHE_SHIFT;
  2518. map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
  2519. if (map_end > end)
  2520. map_end = end;
  2521. page = grab_cache_page(context->inode->i_mapping, page_index);
  2522. BUG_ON(!page);
  2523. wait_on_page_writeback(page);
  2524. if (PageError(page)) {
  2525. ret = -EIO;
  2526. mlog_errno(ret);
  2527. } else
  2528. mark_page_accessed(page);
  2529. unlock_page(page);
  2530. page_cache_release(page);
  2531. page = NULL;
  2532. offset = map_end;
  2533. if (ret)
  2534. break;
  2535. }
  2536. return ret;
  2537. }
  2538. static int ocfs2_make_clusters_writable(struct super_block *sb,
  2539. struct ocfs2_cow_context *context,
  2540. u32 cpos, u32 p_cluster,
  2541. u32 num_clusters, unsigned int e_flags)
  2542. {
  2543. int ret, credits = 0;
  2544. u32 new_bit, new_len;
  2545. struct ocfs2_super *osb = OCFS2_SB(sb);
  2546. handle_t *handle;
  2547. ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
  2548. &context->di_et,
  2549. context->ref_ci,
  2550. context->ref_root_bh,
  2551. &context->meta_ac,
  2552. &context->data_ac, &credits);
  2553. if (ret) {
  2554. mlog_errno(ret);
  2555. return ret;
  2556. }
  2557. handle = ocfs2_start_trans(osb, credits);
  2558. if (IS_ERR(handle)) {
  2559. ret = PTR_ERR(handle);
  2560. mlog_errno(ret);
  2561. goto out;
  2562. }
  2563. while (num_clusters) {
  2564. ret = __ocfs2_claim_clusters(osb, handle, context->data_ac,
  2565. 1, num_clusters,
  2566. &new_bit, &new_len);
  2567. if (ret) {
  2568. mlog_errno(ret);
  2569. goto out_commit;
  2570. }
  2571. ret = ocfs2_replace_clusters(handle, context,
  2572. cpos, p_cluster, new_bit,
  2573. new_len, e_flags);
  2574. if (ret) {
  2575. mlog_errno(ret);
  2576. goto out_commit;
  2577. }
  2578. cpos += new_len;
  2579. p_cluster += new_len;
  2580. num_clusters -= new_len;
  2581. }
  2582. ret = __ocfs2_decrease_refcount(handle, context->ref_ci,
  2583. context->ref_root_bh,
  2584. p_cluster, num_clusters,
  2585. context->meta_ac,
  2586. &context->dealloc);
  2587. if (ret) {
  2588. mlog_errno(ret);
  2589. goto out_commit;
  2590. }
  2591. /*
  2592. * Here we should write the new page out first if we are
  2593. * in write-back mode.
  2594. */
  2595. ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
  2596. if (ret)
  2597. mlog_errno(ret);
  2598. out_commit:
  2599. ocfs2_commit_trans(osb, handle);
  2600. out:
  2601. if (context->data_ac) {
  2602. ocfs2_free_alloc_context(context->data_ac);
  2603. context->data_ac = NULL;
  2604. }
  2605. if (context->meta_ac) {
  2606. ocfs2_free_alloc_context(context->meta_ac);
  2607. context->meta_ac = NULL;
  2608. }
  2609. return ret;
  2610. }
  2611. static int ocfs2_replace_cow(struct inode *inode,
  2612. struct buffer_head *di_bh,
  2613. struct buffer_head *ref_root_bh,
  2614. struct ocfs2_caching_info *ref_ci,
  2615. u32 cow_start, u32 cow_len)
  2616. {
  2617. int ret = 0;
  2618. u32 p_cluster, num_clusters, start = cow_start;
  2619. unsigned int ext_flags;
  2620. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  2621. struct ocfs2_cow_context *context;
  2622. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  2623. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  2624. "tree, but the feature bit is not set in the "
  2625. "super block.", inode->i_ino);
  2626. return -EROFS;
  2627. }
  2628. context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
  2629. if (!context) {
  2630. ret = -ENOMEM;
  2631. mlog_errno(ret);
  2632. return ret;
  2633. }
  2634. context->inode = inode;
  2635. context->cow_start = cow_start;
  2636. context->cow_len = cow_len;
  2637. context->ref_ci = ref_ci;
  2638. context->ref_root_bh = ref_root_bh;
  2639. ocfs2_init_dealloc_ctxt(&context->dealloc);
  2640. ocfs2_init_dinode_extent_tree(&context->di_et,
  2641. INODE_CACHE(inode), di_bh);
  2642. while (cow_len) {
  2643. ret = ocfs2_get_clusters(inode, cow_start, &p_cluster,
  2644. &num_clusters, &ext_flags);
  2645. if (ret) {
  2646. mlog_errno(ret);
  2647. break;
  2648. }
  2649. BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
  2650. if (cow_len < num_clusters)
  2651. num_clusters = cow_len;
  2652. ret = ocfs2_make_clusters_writable(inode->i_sb, context,
  2653. cow_start, p_cluster,
  2654. num_clusters, ext_flags);
  2655. if (ret) {
  2656. mlog_errno(ret);
  2657. break;
  2658. }
  2659. cow_len -= num_clusters;
  2660. cow_start += num_clusters;
  2661. }
  2662. /*
  2663. * truncate the extent map here since no matter whether we meet with
  2664. * any error during the action, we shouldn't trust cached extent map
  2665. * any more.
  2666. */
  2667. ocfs2_extent_map_trunc(inode, start);
  2668. if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
  2669. ocfs2_schedule_truncate_log_flush(osb, 1);
  2670. ocfs2_run_deallocs(osb, &context->dealloc);
  2671. }
  2672. kfree(context);
  2673. return ret;
  2674. }
  2675. /*
  2676. * Starting at cpos, try to CoW write_len clusters.
  2677. * This will stop when it runs into a hole or an unrefcounted extent.
  2678. */
  2679. static int ocfs2_refcount_cow_hunk(struct inode *inode,
  2680. struct buffer_head *di_bh,
  2681. u32 cpos, u32 write_len)
  2682. {
  2683. int ret;
  2684. u32 cow_start = 0, cow_len = 0;
  2685. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  2686. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  2687. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  2688. struct buffer_head *ref_root_bh = NULL;
  2689. struct ocfs2_refcount_tree *ref_tree;
  2690. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  2691. ret = ocfs2_refcount_cal_cow_clusters(inode, di_bh, cpos, write_len,
  2692. &cow_start, &cow_len);
  2693. if (ret) {
  2694. mlog_errno(ret);
  2695. goto out;
  2696. }
  2697. mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, "
  2698. "cow_len %u\n", inode->i_ino,
  2699. cpos, write_len, cow_start, cow_len);
  2700. BUG_ON(cow_len == 0);
  2701. ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
  2702. 1, &ref_tree, &ref_root_bh);
  2703. if (ret) {
  2704. mlog_errno(ret);
  2705. goto out;
  2706. }
  2707. ret = ocfs2_replace_cow(inode, di_bh, ref_root_bh, &ref_tree->rf_ci,
  2708. cow_start, cow_len);
  2709. if (ret)
  2710. mlog_errno(ret);
  2711. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  2712. brelse(ref_root_bh);
  2713. out:
  2714. return ret;
  2715. }
  2716. /*
  2717. * CoW any and all clusters between cpos and cpos+write_len.
  2718. * If this returns successfully, all clusters between cpos and
  2719. * cpos+write_len are safe to modify.
  2720. */
  2721. int ocfs2_refcount_cow(struct inode *inode,
  2722. struct buffer_head *di_bh,
  2723. u32 cpos, u32 write_len)
  2724. {
  2725. int ret = 0;
  2726. u32 p_cluster, num_clusters;
  2727. unsigned int ext_flags;
  2728. while (write_len) {
  2729. ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
  2730. &num_clusters, &ext_flags);
  2731. if (ret) {
  2732. mlog_errno(ret);
  2733. break;
  2734. }
  2735. if (write_len < num_clusters)
  2736. num_clusters = write_len;
  2737. if (ext_flags & OCFS2_EXT_REFCOUNTED) {
  2738. ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
  2739. num_clusters);
  2740. if (ret) {
  2741. mlog_errno(ret);
  2742. break;
  2743. }
  2744. }
  2745. write_len -= num_clusters;
  2746. cpos += num_clusters;
  2747. }
  2748. return ret;
  2749. }