refcounttree.c 101 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * refcounttree.c
  5. *
  6. * Copyright (C) 2009 Oracle. All rights reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public
  10. * License version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <linux/sort.h>
  18. #define MLOG_MASK_PREFIX ML_REFCOUNT
  19. #include <cluster/masklog.h>
  20. #include "ocfs2.h"
  21. #include "inode.h"
  22. #include "alloc.h"
  23. #include "suballoc.h"
  24. #include "journal.h"
  25. #include "uptodate.h"
  26. #include "super.h"
  27. #include "buffer_head_io.h"
  28. #include "blockcheck.h"
  29. #include "refcounttree.h"
  30. #include "sysfile.h"
  31. #include "dlmglue.h"
  32. #include "extent_map.h"
  33. #include "aops.h"
  34. #include "xattr.h"
  35. #include <linux/bio.h>
  36. #include <linux/blkdev.h>
  37. #include <linux/gfp.h>
  38. #include <linux/slab.h>
  39. #include <linux/writeback.h>
  40. #include <linux/pagevec.h>
  41. #include <linux/swap.h>
  42. struct ocfs2_cow_context {
  43. struct inode *inode;
  44. u32 cow_start;
  45. u32 cow_len;
  46. struct ocfs2_extent_tree data_et;
  47. struct ocfs2_refcount_tree *ref_tree;
  48. struct buffer_head *ref_root_bh;
  49. struct ocfs2_alloc_context *meta_ac;
  50. struct ocfs2_alloc_context *data_ac;
  51. struct ocfs2_cached_dealloc_ctxt dealloc;
  52. void *cow_object;
  53. struct ocfs2_post_refcount *post_refcount;
  54. int extra_credits;
  55. int (*get_clusters)(struct ocfs2_cow_context *context,
  56. u32 v_cluster, u32 *p_cluster,
  57. u32 *num_clusters,
  58. unsigned int *extent_flags);
  59. int (*cow_duplicate_clusters)(handle_t *handle,
  60. struct ocfs2_cow_context *context,
  61. u32 cpos, u32 old_cluster,
  62. u32 new_cluster, u32 new_len);
  63. };
  64. static inline struct ocfs2_refcount_tree *
  65. cache_info_to_refcount(struct ocfs2_caching_info *ci)
  66. {
  67. return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
  68. }
  69. static int ocfs2_validate_refcount_block(struct super_block *sb,
  70. struct buffer_head *bh)
  71. {
  72. int rc;
  73. struct ocfs2_refcount_block *rb =
  74. (struct ocfs2_refcount_block *)bh->b_data;
  75. mlog(0, "Validating refcount block %llu\n",
  76. (unsigned long long)bh->b_blocknr);
  77. BUG_ON(!buffer_uptodate(bh));
  78. /*
  79. * If the ecc fails, we return the error but otherwise
  80. * leave the filesystem running. We know any error is
  81. * local to this block.
  82. */
  83. rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
  84. if (rc) {
  85. mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
  86. (unsigned long long)bh->b_blocknr);
  87. return rc;
  88. }
  89. if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
  90. ocfs2_error(sb,
  91. "Refcount block #%llu has bad signature %.*s",
  92. (unsigned long long)bh->b_blocknr, 7,
  93. rb->rf_signature);
  94. return -EINVAL;
  95. }
  96. if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
  97. ocfs2_error(sb,
  98. "Refcount block #%llu has an invalid rf_blkno "
  99. "of %llu",
  100. (unsigned long long)bh->b_blocknr,
  101. (unsigned long long)le64_to_cpu(rb->rf_blkno));
  102. return -EINVAL;
  103. }
  104. if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
  105. ocfs2_error(sb,
  106. "Refcount block #%llu has an invalid "
  107. "rf_fs_generation of #%u",
  108. (unsigned long long)bh->b_blocknr,
  109. le32_to_cpu(rb->rf_fs_generation));
  110. return -EINVAL;
  111. }
  112. return 0;
  113. }
  114. static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
  115. u64 rb_blkno,
  116. struct buffer_head **bh)
  117. {
  118. int rc;
  119. struct buffer_head *tmp = *bh;
  120. rc = ocfs2_read_block(ci, rb_blkno, &tmp,
  121. ocfs2_validate_refcount_block);
  122. /* If ocfs2_read_block() got us a new bh, pass it up. */
  123. if (!rc && !*bh)
  124. *bh = tmp;
  125. return rc;
  126. }
  127. static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
  128. {
  129. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  130. return rf->rf_blkno;
  131. }
  132. static struct super_block *
  133. ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
  134. {
  135. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  136. return rf->rf_sb;
  137. }
  138. static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
  139. {
  140. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  141. spin_lock(&rf->rf_lock);
  142. }
  143. static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
  144. {
  145. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  146. spin_unlock(&rf->rf_lock);
  147. }
  148. static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
  149. {
  150. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  151. mutex_lock(&rf->rf_io_mutex);
  152. }
  153. static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
  154. {
  155. struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
  156. mutex_unlock(&rf->rf_io_mutex);
  157. }
  158. static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
  159. .co_owner = ocfs2_refcount_cache_owner,
  160. .co_get_super = ocfs2_refcount_cache_get_super,
  161. .co_cache_lock = ocfs2_refcount_cache_lock,
  162. .co_cache_unlock = ocfs2_refcount_cache_unlock,
  163. .co_io_lock = ocfs2_refcount_cache_io_lock,
  164. .co_io_unlock = ocfs2_refcount_cache_io_unlock,
  165. };
  166. static struct ocfs2_refcount_tree *
  167. ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
  168. {
  169. struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
  170. struct ocfs2_refcount_tree *tree = NULL;
  171. while (n) {
  172. tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
  173. if (blkno < tree->rf_blkno)
  174. n = n->rb_left;
  175. else if (blkno > tree->rf_blkno)
  176. n = n->rb_right;
  177. else
  178. return tree;
  179. }
  180. return NULL;
  181. }
  182. /* osb_lock is already locked. */
  183. static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
  184. struct ocfs2_refcount_tree *new)
  185. {
  186. u64 rf_blkno = new->rf_blkno;
  187. struct rb_node *parent = NULL;
  188. struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
  189. struct ocfs2_refcount_tree *tmp;
  190. while (*p) {
  191. parent = *p;
  192. tmp = rb_entry(parent, struct ocfs2_refcount_tree,
  193. rf_node);
  194. if (rf_blkno < tmp->rf_blkno)
  195. p = &(*p)->rb_left;
  196. else if (rf_blkno > tmp->rf_blkno)
  197. p = &(*p)->rb_right;
  198. else {
  199. /* This should never happen! */
  200. mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
  201. (unsigned long long)rf_blkno);
  202. BUG();
  203. }
  204. }
  205. rb_link_node(&new->rf_node, parent, p);
  206. rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
  207. }
  208. static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
  209. {
  210. ocfs2_metadata_cache_exit(&tree->rf_ci);
  211. ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
  212. ocfs2_lock_res_free(&tree->rf_lockres);
  213. kfree(tree);
  214. }
  215. static inline void
  216. ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
  217. struct ocfs2_refcount_tree *tree)
  218. {
  219. rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
  220. if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
  221. osb->osb_ref_tree_lru = NULL;
  222. }
  223. static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
  224. struct ocfs2_refcount_tree *tree)
  225. {
  226. spin_lock(&osb->osb_lock);
  227. ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
  228. spin_unlock(&osb->osb_lock);
  229. }
  230. void ocfs2_kref_remove_refcount_tree(struct kref *kref)
  231. {
  232. struct ocfs2_refcount_tree *tree =
  233. container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
  234. ocfs2_free_refcount_tree(tree);
  235. }
  236. static inline void
  237. ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
  238. {
  239. kref_get(&tree->rf_getcnt);
  240. }
  241. static inline void
  242. ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
  243. {
  244. kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
  245. }
  246. static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
  247. struct super_block *sb)
  248. {
  249. ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
  250. mutex_init(&new->rf_io_mutex);
  251. new->rf_sb = sb;
  252. spin_lock_init(&new->rf_lock);
  253. }
  254. static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
  255. struct ocfs2_refcount_tree *new,
  256. u64 rf_blkno, u32 generation)
  257. {
  258. init_rwsem(&new->rf_sem);
  259. ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
  260. rf_blkno, generation);
  261. }
  262. static struct ocfs2_refcount_tree*
  263. ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
  264. {
  265. struct ocfs2_refcount_tree *new;
  266. new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
  267. if (!new)
  268. return NULL;
  269. new->rf_blkno = rf_blkno;
  270. kref_init(&new->rf_getcnt);
  271. ocfs2_init_refcount_tree_ci(new, osb->sb);
  272. return new;
  273. }
  274. static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
  275. struct ocfs2_refcount_tree **ret_tree)
  276. {
  277. int ret = 0;
  278. struct ocfs2_refcount_tree *tree, *new = NULL;
  279. struct buffer_head *ref_root_bh = NULL;
  280. struct ocfs2_refcount_block *ref_rb;
  281. spin_lock(&osb->osb_lock);
  282. if (osb->osb_ref_tree_lru &&
  283. osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
  284. tree = osb->osb_ref_tree_lru;
  285. else
  286. tree = ocfs2_find_refcount_tree(osb, rf_blkno);
  287. if (tree)
  288. goto out;
  289. spin_unlock(&osb->osb_lock);
  290. new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
  291. if (!new) {
  292. ret = -ENOMEM;
  293. mlog_errno(ret);
  294. return ret;
  295. }
  296. /*
  297. * We need the generation to create the refcount tree lock and since
  298. * it isn't changed during the tree modification, we are safe here to
  299. * read without protection.
  300. * We also have to purge the cache after we create the lock since the
  301. * refcount block may have the stale data. It can only be trusted when
  302. * we hold the refcount lock.
  303. */
  304. ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
  305. if (ret) {
  306. mlog_errno(ret);
  307. ocfs2_metadata_cache_exit(&new->rf_ci);
  308. kfree(new);
  309. return ret;
  310. }
  311. ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  312. new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
  313. ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
  314. new->rf_generation);
  315. ocfs2_metadata_cache_purge(&new->rf_ci);
  316. spin_lock(&osb->osb_lock);
  317. tree = ocfs2_find_refcount_tree(osb, rf_blkno);
  318. if (tree)
  319. goto out;
  320. ocfs2_insert_refcount_tree(osb, new);
  321. tree = new;
  322. new = NULL;
  323. out:
  324. *ret_tree = tree;
  325. osb->osb_ref_tree_lru = tree;
  326. spin_unlock(&osb->osb_lock);
  327. if (new)
  328. ocfs2_free_refcount_tree(new);
  329. brelse(ref_root_bh);
  330. return ret;
  331. }
  332. static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
  333. {
  334. int ret;
  335. struct buffer_head *di_bh = NULL;
  336. struct ocfs2_dinode *di;
  337. ret = ocfs2_read_inode_block(inode, &di_bh);
  338. if (ret) {
  339. mlog_errno(ret);
  340. goto out;
  341. }
  342. BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  343. di = (struct ocfs2_dinode *)di_bh->b_data;
  344. *ref_blkno = le64_to_cpu(di->i_refcount_loc);
  345. brelse(di_bh);
  346. out:
  347. return ret;
  348. }
  349. static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
  350. struct ocfs2_refcount_tree *tree, int rw)
  351. {
  352. int ret;
  353. ret = ocfs2_refcount_lock(tree, rw);
  354. if (ret) {
  355. mlog_errno(ret);
  356. goto out;
  357. }
  358. if (rw)
  359. down_write(&tree->rf_sem);
  360. else
  361. down_read(&tree->rf_sem);
  362. out:
  363. return ret;
  364. }
  365. /*
  366. * Lock the refcount tree pointed by ref_blkno and return the tree.
  367. * In most case, we lock the tree and read the refcount block.
  368. * So read it here if the caller really needs it.
  369. *
  370. * If the tree has been re-created by other node, it will free the
  371. * old one and re-create it.
  372. */
  373. int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
  374. u64 ref_blkno, int rw,
  375. struct ocfs2_refcount_tree **ret_tree,
  376. struct buffer_head **ref_bh)
  377. {
  378. int ret, delete_tree = 0;
  379. struct ocfs2_refcount_tree *tree = NULL;
  380. struct buffer_head *ref_root_bh = NULL;
  381. struct ocfs2_refcount_block *rb;
  382. again:
  383. ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
  384. if (ret) {
  385. mlog_errno(ret);
  386. return ret;
  387. }
  388. ocfs2_refcount_tree_get(tree);
  389. ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
  390. if (ret) {
  391. mlog_errno(ret);
  392. ocfs2_refcount_tree_put(tree);
  393. goto out;
  394. }
  395. ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
  396. &ref_root_bh);
  397. if (ret) {
  398. mlog_errno(ret);
  399. ocfs2_unlock_refcount_tree(osb, tree, rw);
  400. ocfs2_refcount_tree_put(tree);
  401. goto out;
  402. }
  403. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  404. /*
  405. * If the refcount block has been freed and re-created, we may need
  406. * to recreate the refcount tree also.
  407. *
  408. * Here we just remove the tree from the rb-tree, and the last
  409. * kref holder will unlock and delete this refcount_tree.
  410. * Then we goto "again" and ocfs2_get_refcount_tree will create
  411. * the new refcount tree for us.
  412. */
  413. if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
  414. if (!tree->rf_removed) {
  415. ocfs2_erase_refcount_tree_from_list(osb, tree);
  416. tree->rf_removed = 1;
  417. delete_tree = 1;
  418. }
  419. ocfs2_unlock_refcount_tree(osb, tree, rw);
  420. /*
  421. * We get an extra reference when we create the refcount
  422. * tree, so another put will destroy it.
  423. */
  424. if (delete_tree)
  425. ocfs2_refcount_tree_put(tree);
  426. brelse(ref_root_bh);
  427. ref_root_bh = NULL;
  428. goto again;
  429. }
  430. *ret_tree = tree;
  431. if (ref_bh) {
  432. *ref_bh = ref_root_bh;
  433. ref_root_bh = NULL;
  434. }
  435. out:
  436. brelse(ref_root_bh);
  437. return ret;
  438. }
  439. int ocfs2_lock_refcount_tree_by_inode(struct inode *inode, int rw,
  440. struct ocfs2_refcount_tree **ret_tree,
  441. struct buffer_head **ref_bh)
  442. {
  443. int ret;
  444. u64 ref_blkno;
  445. ret = ocfs2_get_refcount_block(inode, &ref_blkno);
  446. if (ret) {
  447. mlog_errno(ret);
  448. return ret;
  449. }
  450. return ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno,
  451. rw, ret_tree, ref_bh);
  452. }
  453. void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
  454. struct ocfs2_refcount_tree *tree, int rw)
  455. {
  456. if (rw)
  457. up_write(&tree->rf_sem);
  458. else
  459. up_read(&tree->rf_sem);
  460. ocfs2_refcount_unlock(tree, rw);
  461. ocfs2_refcount_tree_put(tree);
  462. }
  463. void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
  464. {
  465. struct rb_node *node;
  466. struct ocfs2_refcount_tree *tree;
  467. struct rb_root *root = &osb->osb_rf_lock_tree;
  468. while ((node = rb_last(root)) != NULL) {
  469. tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
  470. mlog(0, "Purge tree %llu\n",
  471. (unsigned long long) tree->rf_blkno);
  472. rb_erase(&tree->rf_node, root);
  473. ocfs2_free_refcount_tree(tree);
  474. }
  475. }
  476. /*
  477. * Create a refcount tree for an inode.
  478. * We take for granted that the inode is already locked.
  479. */
  480. static int ocfs2_create_refcount_tree(struct inode *inode,
  481. struct buffer_head *di_bh)
  482. {
  483. int ret;
  484. handle_t *handle = NULL;
  485. struct ocfs2_alloc_context *meta_ac = NULL;
  486. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  487. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  488. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  489. struct buffer_head *new_bh = NULL;
  490. struct ocfs2_refcount_block *rb;
  491. struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
  492. u16 suballoc_bit_start;
  493. u32 num_got;
  494. u64 first_blkno;
  495. BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
  496. mlog(0, "create tree for inode %lu\n", inode->i_ino);
  497. ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
  498. if (ret) {
  499. mlog_errno(ret);
  500. goto out;
  501. }
  502. handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
  503. if (IS_ERR(handle)) {
  504. ret = PTR_ERR(handle);
  505. mlog_errno(ret);
  506. goto out;
  507. }
  508. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  509. OCFS2_JOURNAL_ACCESS_WRITE);
  510. if (ret) {
  511. mlog_errno(ret);
  512. goto out_commit;
  513. }
  514. ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
  515. &suballoc_bit_start, &num_got,
  516. &first_blkno);
  517. if (ret) {
  518. mlog_errno(ret);
  519. goto out_commit;
  520. }
  521. new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
  522. if (!new_tree) {
  523. ret = -ENOMEM;
  524. mlog_errno(ret);
  525. goto out_commit;
  526. }
  527. new_bh = sb_getblk(inode->i_sb, first_blkno);
  528. ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
  529. ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
  530. OCFS2_JOURNAL_ACCESS_CREATE);
  531. if (ret) {
  532. mlog_errno(ret);
  533. goto out_commit;
  534. }
  535. /* Initialize ocfs2_refcount_block. */
  536. rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  537. memset(rb, 0, inode->i_sb->s_blocksize);
  538. strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
  539. rb->rf_suballoc_slot = cpu_to_le16(osb->slot_num);
  540. rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  541. rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
  542. rb->rf_blkno = cpu_to_le64(first_blkno);
  543. rb->rf_count = cpu_to_le32(1);
  544. rb->rf_records.rl_count =
  545. cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
  546. spin_lock(&osb->osb_lock);
  547. rb->rf_generation = osb->s_next_generation++;
  548. spin_unlock(&osb->osb_lock);
  549. ocfs2_journal_dirty(handle, new_bh);
  550. spin_lock(&oi->ip_lock);
  551. oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
  552. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  553. di->i_refcount_loc = cpu_to_le64(first_blkno);
  554. spin_unlock(&oi->ip_lock);
  555. mlog(0, "created tree for inode %lu, refblock %llu\n",
  556. inode->i_ino, (unsigned long long)first_blkno);
  557. ocfs2_journal_dirty(handle, di_bh);
  558. /*
  559. * We have to init the tree lock here since it will use
  560. * the generation number to create it.
  561. */
  562. new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
  563. ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
  564. new_tree->rf_generation);
  565. spin_lock(&osb->osb_lock);
  566. tree = ocfs2_find_refcount_tree(osb, first_blkno);
  567. /*
  568. * We've just created a new refcount tree in this block. If
  569. * we found a refcount tree on the ocfs2_super, it must be
  570. * one we just deleted. We free the old tree before
  571. * inserting the new tree.
  572. */
  573. BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
  574. if (tree)
  575. ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
  576. ocfs2_insert_refcount_tree(osb, new_tree);
  577. spin_unlock(&osb->osb_lock);
  578. new_tree = NULL;
  579. if (tree)
  580. ocfs2_refcount_tree_put(tree);
  581. out_commit:
  582. ocfs2_commit_trans(osb, handle);
  583. out:
  584. if (new_tree) {
  585. ocfs2_metadata_cache_exit(&new_tree->rf_ci);
  586. kfree(new_tree);
  587. }
  588. brelse(new_bh);
  589. if (meta_ac)
  590. ocfs2_free_alloc_context(meta_ac);
  591. return ret;
  592. }
  593. static int ocfs2_set_refcount_tree(struct inode *inode,
  594. struct buffer_head *di_bh,
  595. u64 refcount_loc)
  596. {
  597. int ret;
  598. handle_t *handle = NULL;
  599. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  600. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  601. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  602. struct buffer_head *ref_root_bh = NULL;
  603. struct ocfs2_refcount_block *rb;
  604. struct ocfs2_refcount_tree *ref_tree;
  605. BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
  606. ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
  607. &ref_tree, &ref_root_bh);
  608. if (ret) {
  609. mlog_errno(ret);
  610. return ret;
  611. }
  612. handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
  613. if (IS_ERR(handle)) {
  614. ret = PTR_ERR(handle);
  615. mlog_errno(ret);
  616. goto out;
  617. }
  618. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  619. OCFS2_JOURNAL_ACCESS_WRITE);
  620. if (ret) {
  621. mlog_errno(ret);
  622. goto out_commit;
  623. }
  624. ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
  625. OCFS2_JOURNAL_ACCESS_WRITE);
  626. if (ret) {
  627. mlog_errno(ret);
  628. goto out_commit;
  629. }
  630. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  631. le32_add_cpu(&rb->rf_count, 1);
  632. ocfs2_journal_dirty(handle, ref_root_bh);
  633. spin_lock(&oi->ip_lock);
  634. oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
  635. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  636. di->i_refcount_loc = cpu_to_le64(refcount_loc);
  637. spin_unlock(&oi->ip_lock);
  638. ocfs2_journal_dirty(handle, di_bh);
  639. out_commit:
  640. ocfs2_commit_trans(osb, handle);
  641. out:
  642. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  643. brelse(ref_root_bh);
  644. return ret;
  645. }
  646. int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
  647. {
  648. int ret, delete_tree = 0;
  649. handle_t *handle = NULL;
  650. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  651. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  652. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  653. struct ocfs2_refcount_block *rb;
  654. struct inode *alloc_inode = NULL;
  655. struct buffer_head *alloc_bh = NULL;
  656. struct buffer_head *blk_bh = NULL;
  657. struct ocfs2_refcount_tree *ref_tree;
  658. int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
  659. u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
  660. u16 bit = 0;
  661. if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL))
  662. return 0;
  663. BUG_ON(!ref_blkno);
  664. ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
  665. if (ret) {
  666. mlog_errno(ret);
  667. return ret;
  668. }
  669. rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
  670. /*
  671. * If we are the last user, we need to free the block.
  672. * So lock the allocator ahead.
  673. */
  674. if (le32_to_cpu(rb->rf_count) == 1) {
  675. blk = le64_to_cpu(rb->rf_blkno);
  676. bit = le16_to_cpu(rb->rf_suballoc_bit);
  677. bg_blkno = ocfs2_which_suballoc_group(blk, bit);
  678. alloc_inode = ocfs2_get_system_file_inode(osb,
  679. EXTENT_ALLOC_SYSTEM_INODE,
  680. le16_to_cpu(rb->rf_suballoc_slot));
  681. if (!alloc_inode) {
  682. ret = -ENOMEM;
  683. mlog_errno(ret);
  684. goto out;
  685. }
  686. mutex_lock(&alloc_inode->i_mutex);
  687. ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
  688. if (ret) {
  689. mlog_errno(ret);
  690. goto out_mutex;
  691. }
  692. credits += OCFS2_SUBALLOC_FREE;
  693. }
  694. handle = ocfs2_start_trans(osb, credits);
  695. if (IS_ERR(handle)) {
  696. ret = PTR_ERR(handle);
  697. mlog_errno(ret);
  698. goto out_unlock;
  699. }
  700. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  701. OCFS2_JOURNAL_ACCESS_WRITE);
  702. if (ret) {
  703. mlog_errno(ret);
  704. goto out_commit;
  705. }
  706. ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
  707. OCFS2_JOURNAL_ACCESS_WRITE);
  708. if (ret) {
  709. mlog_errno(ret);
  710. goto out_commit;
  711. }
  712. spin_lock(&oi->ip_lock);
  713. oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
  714. di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
  715. di->i_refcount_loc = 0;
  716. spin_unlock(&oi->ip_lock);
  717. ocfs2_journal_dirty(handle, di_bh);
  718. le32_add_cpu(&rb->rf_count , -1);
  719. ocfs2_journal_dirty(handle, blk_bh);
  720. if (!rb->rf_count) {
  721. delete_tree = 1;
  722. ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
  723. ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
  724. alloc_bh, bit, bg_blkno, 1);
  725. if (ret)
  726. mlog_errno(ret);
  727. }
  728. out_commit:
  729. ocfs2_commit_trans(osb, handle);
  730. out_unlock:
  731. if (alloc_inode) {
  732. ocfs2_inode_unlock(alloc_inode, 1);
  733. brelse(alloc_bh);
  734. }
  735. out_mutex:
  736. if (alloc_inode) {
  737. mutex_unlock(&alloc_inode->i_mutex);
  738. iput(alloc_inode);
  739. }
  740. out:
  741. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  742. if (delete_tree)
  743. ocfs2_refcount_tree_put(ref_tree);
  744. brelse(blk_bh);
  745. return ret;
  746. }
  747. static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
  748. struct buffer_head *ref_leaf_bh,
  749. u64 cpos, unsigned int len,
  750. struct ocfs2_refcount_rec *ret_rec,
  751. int *index)
  752. {
  753. int i = 0;
  754. struct ocfs2_refcount_block *rb =
  755. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  756. struct ocfs2_refcount_rec *rec = NULL;
  757. for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
  758. rec = &rb->rf_records.rl_recs[i];
  759. if (le64_to_cpu(rec->r_cpos) +
  760. le32_to_cpu(rec->r_clusters) <= cpos)
  761. continue;
  762. else if (le64_to_cpu(rec->r_cpos) > cpos)
  763. break;
  764. /* ok, cpos fail in this rec. Just return. */
  765. if (ret_rec)
  766. *ret_rec = *rec;
  767. goto out;
  768. }
  769. if (ret_rec) {
  770. /* We meet with a hole here, so fake the rec. */
  771. ret_rec->r_cpos = cpu_to_le64(cpos);
  772. ret_rec->r_refcount = 0;
  773. if (i < le16_to_cpu(rb->rf_records.rl_used) &&
  774. le64_to_cpu(rec->r_cpos) < cpos + len)
  775. ret_rec->r_clusters =
  776. cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
  777. else
  778. ret_rec->r_clusters = cpu_to_le32(len);
  779. }
  780. out:
  781. *index = i;
  782. }
  783. /*
  784. * Given a cpos and len, try to find the refcount record which contains cpos.
  785. * 1. If cpos can be found in one refcount record, return the record.
  786. * 2. If cpos can't be found, return a fake record which start from cpos
  787. * and end at a small value between cpos+len and start of the next record.
  788. * This fake record has r_refcount = 0.
  789. */
  790. static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
  791. struct buffer_head *ref_root_bh,
  792. u64 cpos, unsigned int len,
  793. struct ocfs2_refcount_rec *ret_rec,
  794. int *index,
  795. struct buffer_head **ret_bh)
  796. {
  797. int ret = 0, i, found;
  798. u32 low_cpos;
  799. struct ocfs2_extent_list *el;
  800. struct ocfs2_extent_rec *tmp, *rec = NULL;
  801. struct ocfs2_extent_block *eb;
  802. struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
  803. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  804. struct ocfs2_refcount_block *rb =
  805. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  806. if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
  807. ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
  808. ret_rec, index);
  809. *ret_bh = ref_root_bh;
  810. get_bh(ref_root_bh);
  811. return 0;
  812. }
  813. el = &rb->rf_list;
  814. low_cpos = cpos & OCFS2_32BIT_POS_MASK;
  815. if (el->l_tree_depth) {
  816. ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
  817. if (ret) {
  818. mlog_errno(ret);
  819. goto out;
  820. }
  821. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  822. el = &eb->h_list;
  823. if (el->l_tree_depth) {
  824. ocfs2_error(sb,
  825. "refcount tree %llu has non zero tree "
  826. "depth in leaf btree tree block %llu\n",
  827. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  828. (unsigned long long)eb_bh->b_blocknr);
  829. ret = -EROFS;
  830. goto out;
  831. }
  832. }
  833. found = 0;
  834. for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
  835. rec = &el->l_recs[i];
  836. if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
  837. found = 1;
  838. break;
  839. }
  840. }
  841. /* adjust len when we have ocfs2_extent_rec after it. */
  842. if (found && i < le16_to_cpu(el->l_next_free_rec) - 1) {
  843. tmp = &el->l_recs[i+1];
  844. if (le32_to_cpu(tmp->e_cpos) < cpos + len)
  845. len = le32_to_cpu(tmp->e_cpos) - cpos;
  846. }
  847. ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
  848. &ref_leaf_bh);
  849. if (ret) {
  850. mlog_errno(ret);
  851. goto out;
  852. }
  853. ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
  854. ret_rec, index);
  855. *ret_bh = ref_leaf_bh;
  856. out:
  857. brelse(eb_bh);
  858. return ret;
  859. }
  860. enum ocfs2_ref_rec_contig {
  861. REF_CONTIG_NONE = 0,
  862. REF_CONTIG_LEFT,
  863. REF_CONTIG_RIGHT,
  864. REF_CONTIG_LEFTRIGHT,
  865. };
  866. static enum ocfs2_ref_rec_contig
  867. ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
  868. int index)
  869. {
  870. if ((rb->rf_records.rl_recs[index].r_refcount ==
  871. rb->rf_records.rl_recs[index + 1].r_refcount) &&
  872. (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
  873. le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
  874. le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
  875. return REF_CONTIG_RIGHT;
  876. return REF_CONTIG_NONE;
  877. }
  878. static enum ocfs2_ref_rec_contig
  879. ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
  880. int index)
  881. {
  882. enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
  883. if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
  884. ret = ocfs2_refcount_rec_adjacent(rb, index);
  885. if (index > 0) {
  886. enum ocfs2_ref_rec_contig tmp;
  887. tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
  888. if (tmp == REF_CONTIG_RIGHT) {
  889. if (ret == REF_CONTIG_RIGHT)
  890. ret = REF_CONTIG_LEFTRIGHT;
  891. else
  892. ret = REF_CONTIG_LEFT;
  893. }
  894. }
  895. return ret;
  896. }
  897. static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
  898. int index)
  899. {
  900. BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
  901. rb->rf_records.rl_recs[index+1].r_refcount);
  902. le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
  903. le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
  904. if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
  905. memmove(&rb->rf_records.rl_recs[index + 1],
  906. &rb->rf_records.rl_recs[index + 2],
  907. sizeof(struct ocfs2_refcount_rec) *
  908. (le16_to_cpu(rb->rf_records.rl_used) - index - 2));
  909. memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
  910. 0, sizeof(struct ocfs2_refcount_rec));
  911. le16_add_cpu(&rb->rf_records.rl_used, -1);
  912. }
  913. /*
  914. * Merge the refcount rec if we are contiguous with the adjacent recs.
  915. */
  916. static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
  917. int index)
  918. {
  919. enum ocfs2_ref_rec_contig contig =
  920. ocfs2_refcount_rec_contig(rb, index);
  921. if (contig == REF_CONTIG_NONE)
  922. return;
  923. if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
  924. BUG_ON(index == 0);
  925. index--;
  926. }
  927. ocfs2_rotate_refcount_rec_left(rb, index);
  928. if (contig == REF_CONTIG_LEFTRIGHT)
  929. ocfs2_rotate_refcount_rec_left(rb, index);
  930. }
  931. /*
  932. * Change the refcount indexed by "index" in ref_bh.
  933. * If refcount reaches 0, remove it.
  934. */
  935. static int ocfs2_change_refcount_rec(handle_t *handle,
  936. struct ocfs2_caching_info *ci,
  937. struct buffer_head *ref_leaf_bh,
  938. int index, int change)
  939. {
  940. int ret;
  941. struct ocfs2_refcount_block *rb =
  942. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  943. struct ocfs2_refcount_list *rl = &rb->rf_records;
  944. struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
  945. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  946. OCFS2_JOURNAL_ACCESS_WRITE);
  947. if (ret) {
  948. mlog_errno(ret);
  949. goto out;
  950. }
  951. mlog(0, "change index %d, old count %u, change %d\n", index,
  952. le32_to_cpu(rec->r_refcount), change);
  953. le32_add_cpu(&rec->r_refcount, change);
  954. if (!rec->r_refcount) {
  955. if (index != le16_to_cpu(rl->rl_used) - 1) {
  956. memmove(rec, rec + 1,
  957. (le16_to_cpu(rl->rl_used) - index - 1) *
  958. sizeof(struct ocfs2_refcount_rec));
  959. memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
  960. 0, sizeof(struct ocfs2_refcount_rec));
  961. }
  962. le16_add_cpu(&rl->rl_used, -1);
  963. } else
  964. ocfs2_refcount_rec_merge(rb, index);
  965. ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
  966. if (ret)
  967. mlog_errno(ret);
  968. out:
  969. return ret;
  970. }
  971. static int ocfs2_expand_inline_ref_root(handle_t *handle,
  972. struct ocfs2_caching_info *ci,
  973. struct buffer_head *ref_root_bh,
  974. struct buffer_head **ref_leaf_bh,
  975. struct ocfs2_alloc_context *meta_ac)
  976. {
  977. int ret;
  978. u16 suballoc_bit_start;
  979. u32 num_got;
  980. u64 blkno;
  981. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  982. struct buffer_head *new_bh = NULL;
  983. struct ocfs2_refcount_block *new_rb;
  984. struct ocfs2_refcount_block *root_rb =
  985. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  986. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  987. OCFS2_JOURNAL_ACCESS_WRITE);
  988. if (ret) {
  989. mlog_errno(ret);
  990. goto out;
  991. }
  992. ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
  993. &suballoc_bit_start, &num_got,
  994. &blkno);
  995. if (ret) {
  996. mlog_errno(ret);
  997. goto out;
  998. }
  999. new_bh = sb_getblk(sb, blkno);
  1000. if (new_bh == NULL) {
  1001. ret = -EIO;
  1002. mlog_errno(ret);
  1003. goto out;
  1004. }
  1005. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  1006. ret = ocfs2_journal_access_rb(handle, ci, new_bh,
  1007. OCFS2_JOURNAL_ACCESS_CREATE);
  1008. if (ret) {
  1009. mlog_errno(ret);
  1010. goto out;
  1011. }
  1012. /*
  1013. * Initialize ocfs2_refcount_block.
  1014. * It should contain the same information as the old root.
  1015. * so just memcpy it and change the corresponding field.
  1016. */
  1017. memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
  1018. new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  1019. new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
  1020. new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  1021. new_rb->rf_blkno = cpu_to_le64(blkno);
  1022. new_rb->rf_cpos = cpu_to_le32(0);
  1023. new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
  1024. new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
  1025. ocfs2_journal_dirty(handle, new_bh);
  1026. /* Now change the root. */
  1027. memset(&root_rb->rf_list, 0, sb->s_blocksize -
  1028. offsetof(struct ocfs2_refcount_block, rf_list));
  1029. root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
  1030. root_rb->rf_clusters = cpu_to_le32(1);
  1031. root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
  1032. root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
  1033. root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
  1034. root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
  1035. ocfs2_journal_dirty(handle, ref_root_bh);
  1036. mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno,
  1037. le16_to_cpu(new_rb->rf_records.rl_used));
  1038. *ref_leaf_bh = new_bh;
  1039. new_bh = NULL;
  1040. out:
  1041. brelse(new_bh);
  1042. return ret;
  1043. }
  1044. static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
  1045. struct ocfs2_refcount_rec *next)
  1046. {
  1047. if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
  1048. ocfs2_get_ref_rec_low_cpos(next))
  1049. return 1;
  1050. return 0;
  1051. }
  1052. static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
  1053. {
  1054. const struct ocfs2_refcount_rec *l = a, *r = b;
  1055. u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
  1056. u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
  1057. if (l_cpos > r_cpos)
  1058. return 1;
  1059. if (l_cpos < r_cpos)
  1060. return -1;
  1061. return 0;
  1062. }
  1063. static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
  1064. {
  1065. const struct ocfs2_refcount_rec *l = a, *r = b;
  1066. u64 l_cpos = le64_to_cpu(l->r_cpos);
  1067. u64 r_cpos = le64_to_cpu(r->r_cpos);
  1068. if (l_cpos > r_cpos)
  1069. return 1;
  1070. if (l_cpos < r_cpos)
  1071. return -1;
  1072. return 0;
  1073. }
  1074. static void swap_refcount_rec(void *a, void *b, int size)
  1075. {
  1076. struct ocfs2_refcount_rec *l = a, *r = b, tmp;
  1077. tmp = *(struct ocfs2_refcount_rec *)l;
  1078. *(struct ocfs2_refcount_rec *)l =
  1079. *(struct ocfs2_refcount_rec *)r;
  1080. *(struct ocfs2_refcount_rec *)r = tmp;
  1081. }
  1082. /*
  1083. * The refcount cpos are ordered by their 64bit cpos,
  1084. * But we will use the low 32 bit to be the e_cpos in the b-tree.
  1085. * So we need to make sure that this pos isn't intersected with others.
  1086. *
  1087. * Note: The refcount block is already sorted by their low 32 bit cpos,
  1088. * So just try the middle pos first, and we will exit when we find
  1089. * the good position.
  1090. */
  1091. static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
  1092. u32 *split_pos, int *split_index)
  1093. {
  1094. int num_used = le16_to_cpu(rl->rl_used);
  1095. int delta, middle = num_used / 2;
  1096. for (delta = 0; delta < middle; delta++) {
  1097. /* Let's check delta earlier than middle */
  1098. if (ocfs2_refcount_rec_no_intersect(
  1099. &rl->rl_recs[middle - delta - 1],
  1100. &rl->rl_recs[middle - delta])) {
  1101. *split_index = middle - delta;
  1102. break;
  1103. }
  1104. /* For even counts, don't walk off the end */
  1105. if ((middle + delta + 1) == num_used)
  1106. continue;
  1107. /* Now try delta past middle */
  1108. if (ocfs2_refcount_rec_no_intersect(
  1109. &rl->rl_recs[middle + delta],
  1110. &rl->rl_recs[middle + delta + 1])) {
  1111. *split_index = middle + delta + 1;
  1112. break;
  1113. }
  1114. }
  1115. if (delta >= middle)
  1116. return -ENOSPC;
  1117. *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
  1118. return 0;
  1119. }
  1120. static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
  1121. struct buffer_head *new_bh,
  1122. u32 *split_cpos)
  1123. {
  1124. int split_index = 0, num_moved, ret;
  1125. u32 cpos = 0;
  1126. struct ocfs2_refcount_block *rb =
  1127. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1128. struct ocfs2_refcount_list *rl = &rb->rf_records;
  1129. struct ocfs2_refcount_block *new_rb =
  1130. (struct ocfs2_refcount_block *)new_bh->b_data;
  1131. struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
  1132. mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n",
  1133. (unsigned long long)ref_leaf_bh->b_blocknr,
  1134. le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
  1135. /*
  1136. * XXX: Improvement later.
  1137. * If we know all the high 32 bit cpos is the same, no need to sort.
  1138. *
  1139. * In order to make the whole process safe, we do:
  1140. * 1. sort the entries by their low 32 bit cpos first so that we can
  1141. * find the split cpos easily.
  1142. * 2. call ocfs2_insert_extent to insert the new refcount block.
  1143. * 3. move the refcount rec to the new block.
  1144. * 4. sort the entries by their 64 bit cpos.
  1145. * 5. dirty the new_rb and rb.
  1146. */
  1147. sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
  1148. sizeof(struct ocfs2_refcount_rec),
  1149. cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
  1150. ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
  1151. if (ret) {
  1152. mlog_errno(ret);
  1153. return ret;
  1154. }
  1155. new_rb->rf_cpos = cpu_to_le32(cpos);
  1156. /* move refcount records starting from split_index to the new block. */
  1157. num_moved = le16_to_cpu(rl->rl_used) - split_index;
  1158. memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
  1159. num_moved * sizeof(struct ocfs2_refcount_rec));
  1160. /*ok, remove the entries we just moved over to the other block. */
  1161. memset(&rl->rl_recs[split_index], 0,
  1162. num_moved * sizeof(struct ocfs2_refcount_rec));
  1163. /* change old and new rl_used accordingly. */
  1164. le16_add_cpu(&rl->rl_used, -num_moved);
  1165. new_rl->rl_used = cpu_to_le32(num_moved);
  1166. sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
  1167. sizeof(struct ocfs2_refcount_rec),
  1168. cmp_refcount_rec_by_cpos, swap_refcount_rec);
  1169. sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
  1170. sizeof(struct ocfs2_refcount_rec),
  1171. cmp_refcount_rec_by_cpos, swap_refcount_rec);
  1172. *split_cpos = cpos;
  1173. return 0;
  1174. }
  1175. static int ocfs2_new_leaf_refcount_block(handle_t *handle,
  1176. struct ocfs2_caching_info *ci,
  1177. struct buffer_head *ref_root_bh,
  1178. struct buffer_head *ref_leaf_bh,
  1179. struct ocfs2_alloc_context *meta_ac)
  1180. {
  1181. int ret;
  1182. u16 suballoc_bit_start;
  1183. u32 num_got, new_cpos;
  1184. u64 blkno;
  1185. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1186. struct ocfs2_refcount_block *root_rb =
  1187. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1188. struct buffer_head *new_bh = NULL;
  1189. struct ocfs2_refcount_block *new_rb;
  1190. struct ocfs2_extent_tree ref_et;
  1191. BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
  1192. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  1193. OCFS2_JOURNAL_ACCESS_WRITE);
  1194. if (ret) {
  1195. mlog_errno(ret);
  1196. goto out;
  1197. }
  1198. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1199. OCFS2_JOURNAL_ACCESS_WRITE);
  1200. if (ret) {
  1201. mlog_errno(ret);
  1202. goto out;
  1203. }
  1204. ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
  1205. &suballoc_bit_start, &num_got,
  1206. &blkno);
  1207. if (ret) {
  1208. mlog_errno(ret);
  1209. goto out;
  1210. }
  1211. new_bh = sb_getblk(sb, blkno);
  1212. if (new_bh == NULL) {
  1213. ret = -EIO;
  1214. mlog_errno(ret);
  1215. goto out;
  1216. }
  1217. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  1218. ret = ocfs2_journal_access_rb(handle, ci, new_bh,
  1219. OCFS2_JOURNAL_ACCESS_CREATE);
  1220. if (ret) {
  1221. mlog_errno(ret);
  1222. goto out;
  1223. }
  1224. /* Initialize ocfs2_refcount_block. */
  1225. new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
  1226. memset(new_rb, 0, sb->s_blocksize);
  1227. strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
  1228. new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
  1229. new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  1230. new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
  1231. new_rb->rf_blkno = cpu_to_le64(blkno);
  1232. new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
  1233. new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
  1234. new_rb->rf_records.rl_count =
  1235. cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
  1236. new_rb->rf_generation = root_rb->rf_generation;
  1237. ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
  1238. if (ret) {
  1239. mlog_errno(ret);
  1240. goto out;
  1241. }
  1242. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1243. ocfs2_journal_dirty(handle, new_bh);
  1244. ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
  1245. mlog(0, "insert new leaf block %llu at %u\n",
  1246. (unsigned long long)new_bh->b_blocknr, new_cpos);
  1247. /* Insert the new leaf block with the specific offset cpos. */
  1248. ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
  1249. 1, 0, meta_ac);
  1250. if (ret)
  1251. mlog_errno(ret);
  1252. out:
  1253. brelse(new_bh);
  1254. return ret;
  1255. }
  1256. static int ocfs2_expand_refcount_tree(handle_t *handle,
  1257. struct ocfs2_caching_info *ci,
  1258. struct buffer_head *ref_root_bh,
  1259. struct buffer_head *ref_leaf_bh,
  1260. struct ocfs2_alloc_context *meta_ac)
  1261. {
  1262. int ret;
  1263. struct buffer_head *expand_bh = NULL;
  1264. if (ref_root_bh == ref_leaf_bh) {
  1265. /*
  1266. * the old root bh hasn't been expanded to a b-tree,
  1267. * so expand it first.
  1268. */
  1269. ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
  1270. &expand_bh, meta_ac);
  1271. if (ret) {
  1272. mlog_errno(ret);
  1273. goto out;
  1274. }
  1275. } else {
  1276. expand_bh = ref_leaf_bh;
  1277. get_bh(expand_bh);
  1278. }
  1279. /* Now add a new refcount block into the tree.*/
  1280. ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
  1281. expand_bh, meta_ac);
  1282. if (ret)
  1283. mlog_errno(ret);
  1284. out:
  1285. brelse(expand_bh);
  1286. return ret;
  1287. }
  1288. /*
  1289. * Adjust the extent rec in b-tree representing ref_leaf_bh.
  1290. *
  1291. * Only called when we have inserted a new refcount rec at index 0
  1292. * which means ocfs2_extent_rec.e_cpos may need some change.
  1293. */
  1294. static int ocfs2_adjust_refcount_rec(handle_t *handle,
  1295. struct ocfs2_caching_info *ci,
  1296. struct buffer_head *ref_root_bh,
  1297. struct buffer_head *ref_leaf_bh,
  1298. struct ocfs2_refcount_rec *rec)
  1299. {
  1300. int ret = 0, i;
  1301. u32 new_cpos, old_cpos;
  1302. struct ocfs2_path *path = NULL;
  1303. struct ocfs2_extent_tree et;
  1304. struct ocfs2_refcount_block *rb =
  1305. (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1306. struct ocfs2_extent_list *el;
  1307. if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
  1308. goto out;
  1309. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1310. old_cpos = le32_to_cpu(rb->rf_cpos);
  1311. new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
  1312. if (old_cpos <= new_cpos)
  1313. goto out;
  1314. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  1315. path = ocfs2_new_path_from_et(&et);
  1316. if (!path) {
  1317. ret = -ENOMEM;
  1318. mlog_errno(ret);
  1319. goto out;
  1320. }
  1321. ret = ocfs2_find_path(ci, path, old_cpos);
  1322. if (ret) {
  1323. mlog_errno(ret);
  1324. goto out;
  1325. }
  1326. /*
  1327. * 2 more credits, one for the leaf refcount block, one for
  1328. * the extent block contains the extent rec.
  1329. */
  1330. ret = ocfs2_extend_trans(handle, handle->h_buffer_credits + 2);
  1331. if (ret < 0) {
  1332. mlog_errno(ret);
  1333. goto out;
  1334. }
  1335. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1336. OCFS2_JOURNAL_ACCESS_WRITE);
  1337. if (ret < 0) {
  1338. mlog_errno(ret);
  1339. goto out;
  1340. }
  1341. ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
  1342. OCFS2_JOURNAL_ACCESS_WRITE);
  1343. if (ret < 0) {
  1344. mlog_errno(ret);
  1345. goto out;
  1346. }
  1347. /* change the leaf extent block first. */
  1348. el = path_leaf_el(path);
  1349. for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
  1350. if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
  1351. break;
  1352. BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
  1353. el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
  1354. /* change the r_cpos in the leaf block. */
  1355. rb->rf_cpos = cpu_to_le32(new_cpos);
  1356. ocfs2_journal_dirty(handle, path_leaf_bh(path));
  1357. ocfs2_journal_dirty(handle, ref_leaf_bh);
  1358. out:
  1359. ocfs2_free_path(path);
  1360. return ret;
  1361. }
  1362. static int ocfs2_insert_refcount_rec(handle_t *handle,
  1363. struct ocfs2_caching_info *ci,
  1364. struct buffer_head *ref_root_bh,
  1365. struct buffer_head *ref_leaf_bh,
  1366. struct ocfs2_refcount_rec *rec,
  1367. int index,
  1368. struct ocfs2_alloc_context *meta_ac)
  1369. {
  1370. int ret;
  1371. struct ocfs2_refcount_block *rb =
  1372. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1373. struct ocfs2_refcount_list *rf_list = &rb->rf_records;
  1374. struct buffer_head *new_bh = NULL;
  1375. BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
  1376. if (rf_list->rl_used == rf_list->rl_count) {
  1377. u64 cpos = le64_to_cpu(rec->r_cpos);
  1378. u32 len = le32_to_cpu(rec->r_clusters);
  1379. ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
  1380. ref_leaf_bh, meta_ac);
  1381. if (ret) {
  1382. mlog_errno(ret);
  1383. goto out;
  1384. }
  1385. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1386. cpos, len, NULL, &index,
  1387. &new_bh);
  1388. if (ret) {
  1389. mlog_errno(ret);
  1390. goto out;
  1391. }
  1392. ref_leaf_bh = new_bh;
  1393. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1394. rf_list = &rb->rf_records;
  1395. }
  1396. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1397. OCFS2_JOURNAL_ACCESS_WRITE);
  1398. if (ret) {
  1399. mlog_errno(ret);
  1400. goto out;
  1401. }
  1402. if (index < le16_to_cpu(rf_list->rl_used))
  1403. memmove(&rf_list->rl_recs[index + 1],
  1404. &rf_list->rl_recs[index],
  1405. (le16_to_cpu(rf_list->rl_used) - index) *
  1406. sizeof(struct ocfs2_refcount_rec));
  1407. mlog(0, "insert refcount record start %llu, len %u, count %u "
  1408. "to leaf block %llu at index %d\n",
  1409. (unsigned long long)le64_to_cpu(rec->r_cpos),
  1410. le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount),
  1411. (unsigned long long)ref_leaf_bh->b_blocknr, index);
  1412. rf_list->rl_recs[index] = *rec;
  1413. le16_add_cpu(&rf_list->rl_used, 1);
  1414. ocfs2_refcount_rec_merge(rb, index);
  1415. ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
  1416. if (ret) {
  1417. mlog_errno(ret);
  1418. goto out;
  1419. }
  1420. if (index == 0) {
  1421. ret = ocfs2_adjust_refcount_rec(handle, ci,
  1422. ref_root_bh,
  1423. ref_leaf_bh, rec);
  1424. if (ret)
  1425. mlog_errno(ret);
  1426. }
  1427. out:
  1428. brelse(new_bh);
  1429. return ret;
  1430. }
  1431. /*
  1432. * Split the refcount_rec indexed by "index" in ref_leaf_bh.
  1433. * This is much simple than our b-tree code.
  1434. * split_rec is the new refcount rec we want to insert.
  1435. * If split_rec->r_refcount > 0, we are changing the refcount(in case we
  1436. * increase refcount or decrease a refcount to non-zero).
  1437. * If split_rec->r_refcount == 0, we are punching a hole in current refcount
  1438. * rec( in case we decrease a refcount to zero).
  1439. */
  1440. static int ocfs2_split_refcount_rec(handle_t *handle,
  1441. struct ocfs2_caching_info *ci,
  1442. struct buffer_head *ref_root_bh,
  1443. struct buffer_head *ref_leaf_bh,
  1444. struct ocfs2_refcount_rec *split_rec,
  1445. int index,
  1446. struct ocfs2_alloc_context *meta_ac,
  1447. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1448. {
  1449. int ret, recs_need;
  1450. u32 len;
  1451. struct ocfs2_refcount_block *rb =
  1452. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1453. struct ocfs2_refcount_list *rf_list = &rb->rf_records;
  1454. struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
  1455. struct ocfs2_refcount_rec *tail_rec = NULL;
  1456. struct buffer_head *new_bh = NULL;
  1457. BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
  1458. mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n",
  1459. le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters),
  1460. le64_to_cpu(split_rec->r_cpos),
  1461. le32_to_cpu(split_rec->r_clusters));
  1462. /*
  1463. * If we just need to split the header or tail clusters,
  1464. * no more recs are needed, just split is OK.
  1465. * Otherwise we at least need one new recs.
  1466. */
  1467. if (!split_rec->r_refcount &&
  1468. (split_rec->r_cpos == orig_rec->r_cpos ||
  1469. le64_to_cpu(split_rec->r_cpos) +
  1470. le32_to_cpu(split_rec->r_clusters) ==
  1471. le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
  1472. recs_need = 0;
  1473. else
  1474. recs_need = 1;
  1475. /*
  1476. * We need one more rec if we split in the middle and the new rec have
  1477. * some refcount in it.
  1478. */
  1479. if (split_rec->r_refcount &&
  1480. (split_rec->r_cpos != orig_rec->r_cpos &&
  1481. le64_to_cpu(split_rec->r_cpos) +
  1482. le32_to_cpu(split_rec->r_clusters) !=
  1483. le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
  1484. recs_need++;
  1485. /* If the leaf block don't have enough record, expand it. */
  1486. if (le16_to_cpu(rf_list->rl_used) + recs_need > rf_list->rl_count) {
  1487. struct ocfs2_refcount_rec tmp_rec;
  1488. u64 cpos = le64_to_cpu(orig_rec->r_cpos);
  1489. len = le32_to_cpu(orig_rec->r_clusters);
  1490. ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
  1491. ref_leaf_bh, meta_ac);
  1492. if (ret) {
  1493. mlog_errno(ret);
  1494. goto out;
  1495. }
  1496. /*
  1497. * We have to re-get it since now cpos may be moved to
  1498. * another leaf block.
  1499. */
  1500. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1501. cpos, len, &tmp_rec, &index,
  1502. &new_bh);
  1503. if (ret) {
  1504. mlog_errno(ret);
  1505. goto out;
  1506. }
  1507. ref_leaf_bh = new_bh;
  1508. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1509. rf_list = &rb->rf_records;
  1510. orig_rec = &rf_list->rl_recs[index];
  1511. }
  1512. ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
  1513. OCFS2_JOURNAL_ACCESS_WRITE);
  1514. if (ret) {
  1515. mlog_errno(ret);
  1516. goto out;
  1517. }
  1518. /*
  1519. * We have calculated out how many new records we need and store
  1520. * in recs_need, so spare enough space first by moving the records
  1521. * after "index" to the end.
  1522. */
  1523. if (index != le16_to_cpu(rf_list->rl_used) - 1)
  1524. memmove(&rf_list->rl_recs[index + 1 + recs_need],
  1525. &rf_list->rl_recs[index + 1],
  1526. (le16_to_cpu(rf_list->rl_used) - index - 1) *
  1527. sizeof(struct ocfs2_refcount_rec));
  1528. len = (le64_to_cpu(orig_rec->r_cpos) +
  1529. le32_to_cpu(orig_rec->r_clusters)) -
  1530. (le64_to_cpu(split_rec->r_cpos) +
  1531. le32_to_cpu(split_rec->r_clusters));
  1532. /*
  1533. * If we have "len", the we will split in the tail and move it
  1534. * to the end of the space we have just spared.
  1535. */
  1536. if (len) {
  1537. tail_rec = &rf_list->rl_recs[index + recs_need];
  1538. memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
  1539. le64_add_cpu(&tail_rec->r_cpos,
  1540. le32_to_cpu(tail_rec->r_clusters) - len);
  1541. tail_rec->r_clusters = le32_to_cpu(len);
  1542. }
  1543. /*
  1544. * If the split pos isn't the same as the original one, we need to
  1545. * split in the head.
  1546. *
  1547. * Note: We have the chance that split_rec.r_refcount = 0,
  1548. * recs_need = 0 and len > 0, which means we just cut the head from
  1549. * the orig_rec and in that case we have done some modification in
  1550. * orig_rec above, so the check for r_cpos is faked.
  1551. */
  1552. if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
  1553. len = le64_to_cpu(split_rec->r_cpos) -
  1554. le64_to_cpu(orig_rec->r_cpos);
  1555. orig_rec->r_clusters = cpu_to_le32(len);
  1556. index++;
  1557. }
  1558. le16_add_cpu(&rf_list->rl_used, recs_need);
  1559. if (split_rec->r_refcount) {
  1560. rf_list->rl_recs[index] = *split_rec;
  1561. mlog(0, "insert refcount record start %llu, len %u, count %u "
  1562. "to leaf block %llu at index %d\n",
  1563. (unsigned long long)le64_to_cpu(split_rec->r_cpos),
  1564. le32_to_cpu(split_rec->r_clusters),
  1565. le32_to_cpu(split_rec->r_refcount),
  1566. (unsigned long long)ref_leaf_bh->b_blocknr, index);
  1567. ocfs2_refcount_rec_merge(rb, index);
  1568. }
  1569. ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
  1570. if (ret)
  1571. mlog_errno(ret);
  1572. out:
  1573. brelse(new_bh);
  1574. return ret;
  1575. }
  1576. static int __ocfs2_increase_refcount(handle_t *handle,
  1577. struct ocfs2_caching_info *ci,
  1578. struct buffer_head *ref_root_bh,
  1579. u64 cpos, u32 len,
  1580. struct ocfs2_alloc_context *meta_ac,
  1581. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1582. {
  1583. int ret = 0, index;
  1584. struct buffer_head *ref_leaf_bh = NULL;
  1585. struct ocfs2_refcount_rec rec;
  1586. unsigned int set_len = 0;
  1587. mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n",
  1588. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  1589. (unsigned long long)cpos, len);
  1590. while (len) {
  1591. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1592. cpos, len, &rec, &index,
  1593. &ref_leaf_bh);
  1594. if (ret) {
  1595. mlog_errno(ret);
  1596. goto out;
  1597. }
  1598. set_len = le32_to_cpu(rec.r_clusters);
  1599. /*
  1600. * Here we may meet with 3 situations:
  1601. *
  1602. * 1. If we find an already existing record, and the length
  1603. * is the same, cool, we just need to increase the r_refcount
  1604. * and it is OK.
  1605. * 2. If we find a hole, just insert it with r_refcount = 1.
  1606. * 3. If we are in the middle of one extent record, split
  1607. * it.
  1608. */
  1609. if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
  1610. set_len <= len) {
  1611. mlog(0, "increase refcount rec, start %llu, len %u, "
  1612. "count %u\n", (unsigned long long)cpos, set_len,
  1613. le32_to_cpu(rec.r_refcount));
  1614. ret = ocfs2_change_refcount_rec(handle, ci,
  1615. ref_leaf_bh, index, 1);
  1616. if (ret) {
  1617. mlog_errno(ret);
  1618. goto out;
  1619. }
  1620. } else if (!rec.r_refcount) {
  1621. rec.r_refcount = cpu_to_le32(1);
  1622. mlog(0, "insert refcount rec, start %llu, len %u\n",
  1623. (unsigned long long)le64_to_cpu(rec.r_cpos),
  1624. set_len);
  1625. ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
  1626. ref_leaf_bh,
  1627. &rec, index, meta_ac);
  1628. if (ret) {
  1629. mlog_errno(ret);
  1630. goto out;
  1631. }
  1632. } else {
  1633. set_len = min((u64)(cpos + len),
  1634. le64_to_cpu(rec.r_cpos) + set_len) - cpos;
  1635. rec.r_cpos = cpu_to_le64(cpos);
  1636. rec.r_clusters = cpu_to_le32(set_len);
  1637. le32_add_cpu(&rec.r_refcount, 1);
  1638. mlog(0, "split refcount rec, start %llu, "
  1639. "len %u, count %u\n",
  1640. (unsigned long long)le64_to_cpu(rec.r_cpos),
  1641. set_len, le32_to_cpu(rec.r_refcount));
  1642. ret = ocfs2_split_refcount_rec(handle, ci,
  1643. ref_root_bh, ref_leaf_bh,
  1644. &rec, index,
  1645. meta_ac, dealloc);
  1646. if (ret) {
  1647. mlog_errno(ret);
  1648. goto out;
  1649. }
  1650. }
  1651. cpos += set_len;
  1652. len -= set_len;
  1653. brelse(ref_leaf_bh);
  1654. ref_leaf_bh = NULL;
  1655. }
  1656. out:
  1657. brelse(ref_leaf_bh);
  1658. return ret;
  1659. }
  1660. static int ocfs2_remove_refcount_extent(handle_t *handle,
  1661. struct ocfs2_caching_info *ci,
  1662. struct buffer_head *ref_root_bh,
  1663. struct buffer_head *ref_leaf_bh,
  1664. struct ocfs2_alloc_context *meta_ac,
  1665. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1666. {
  1667. int ret;
  1668. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1669. struct ocfs2_refcount_block *rb =
  1670. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1671. struct ocfs2_extent_tree et;
  1672. BUG_ON(rb->rf_records.rl_used);
  1673. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  1674. ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
  1675. 1, meta_ac, dealloc);
  1676. if (ret) {
  1677. mlog_errno(ret);
  1678. goto out;
  1679. }
  1680. ocfs2_remove_from_cache(ci, ref_leaf_bh);
  1681. /*
  1682. * add the freed block to the dealloc so that it will be freed
  1683. * when we run dealloc.
  1684. */
  1685. ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
  1686. le16_to_cpu(rb->rf_suballoc_slot),
  1687. le64_to_cpu(rb->rf_blkno),
  1688. le16_to_cpu(rb->rf_suballoc_bit));
  1689. if (ret) {
  1690. mlog_errno(ret);
  1691. goto out;
  1692. }
  1693. ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
  1694. OCFS2_JOURNAL_ACCESS_WRITE);
  1695. if (ret) {
  1696. mlog_errno(ret);
  1697. goto out;
  1698. }
  1699. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  1700. le32_add_cpu(&rb->rf_clusters, -1);
  1701. /*
  1702. * check whether we need to restore the root refcount block if
  1703. * there is no leaf extent block at atll.
  1704. */
  1705. if (!rb->rf_list.l_next_free_rec) {
  1706. BUG_ON(rb->rf_clusters);
  1707. mlog(0, "reset refcount tree root %llu to be a record block.\n",
  1708. (unsigned long long)ref_root_bh->b_blocknr);
  1709. rb->rf_flags = 0;
  1710. rb->rf_parent = 0;
  1711. rb->rf_cpos = 0;
  1712. memset(&rb->rf_records, 0, sb->s_blocksize -
  1713. offsetof(struct ocfs2_refcount_block, rf_records));
  1714. rb->rf_records.rl_count =
  1715. cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
  1716. }
  1717. ocfs2_journal_dirty(handle, ref_root_bh);
  1718. out:
  1719. return ret;
  1720. }
  1721. static int ocfs2_decrease_refcount_rec(handle_t *handle,
  1722. struct ocfs2_caching_info *ci,
  1723. struct buffer_head *ref_root_bh,
  1724. struct buffer_head *ref_leaf_bh,
  1725. int index, u64 cpos, unsigned int len,
  1726. struct ocfs2_alloc_context *meta_ac,
  1727. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1728. {
  1729. int ret;
  1730. struct ocfs2_refcount_block *rb =
  1731. (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1732. struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
  1733. BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
  1734. BUG_ON(cpos + len >
  1735. le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
  1736. if (cpos == le64_to_cpu(rec->r_cpos) &&
  1737. len == le32_to_cpu(rec->r_clusters))
  1738. ret = ocfs2_change_refcount_rec(handle, ci,
  1739. ref_leaf_bh, index, -1);
  1740. else {
  1741. struct ocfs2_refcount_rec split = *rec;
  1742. split.r_cpos = cpu_to_le64(cpos);
  1743. split.r_clusters = cpu_to_le32(len);
  1744. le32_add_cpu(&split.r_refcount, -1);
  1745. mlog(0, "split refcount rec, start %llu, "
  1746. "len %u, count %u, original start %llu, len %u\n",
  1747. (unsigned long long)le64_to_cpu(split.r_cpos),
  1748. len, le32_to_cpu(split.r_refcount),
  1749. (unsigned long long)le64_to_cpu(rec->r_cpos),
  1750. le32_to_cpu(rec->r_clusters));
  1751. ret = ocfs2_split_refcount_rec(handle, ci,
  1752. ref_root_bh, ref_leaf_bh,
  1753. &split, index,
  1754. meta_ac, dealloc);
  1755. }
  1756. if (ret) {
  1757. mlog_errno(ret);
  1758. goto out;
  1759. }
  1760. /* Remove the leaf refcount block if it contains no refcount record. */
  1761. if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
  1762. ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
  1763. ref_leaf_bh, meta_ac,
  1764. dealloc);
  1765. if (ret)
  1766. mlog_errno(ret);
  1767. }
  1768. out:
  1769. return ret;
  1770. }
  1771. static int __ocfs2_decrease_refcount(handle_t *handle,
  1772. struct ocfs2_caching_info *ci,
  1773. struct buffer_head *ref_root_bh,
  1774. u64 cpos, u32 len,
  1775. struct ocfs2_alloc_context *meta_ac,
  1776. struct ocfs2_cached_dealloc_ctxt *dealloc,
  1777. int delete)
  1778. {
  1779. int ret = 0, index = 0;
  1780. struct ocfs2_refcount_rec rec;
  1781. unsigned int r_count = 0, r_len;
  1782. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  1783. struct buffer_head *ref_leaf_bh = NULL;
  1784. mlog(0, "Tree owner %llu, decrease refcount start %llu, "
  1785. "len %u, delete %u\n",
  1786. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  1787. (unsigned long long)cpos, len, delete);
  1788. while (len) {
  1789. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1790. cpos, len, &rec, &index,
  1791. &ref_leaf_bh);
  1792. if (ret) {
  1793. mlog_errno(ret);
  1794. goto out;
  1795. }
  1796. r_count = le32_to_cpu(rec.r_refcount);
  1797. BUG_ON(r_count == 0);
  1798. if (!delete)
  1799. BUG_ON(r_count > 1);
  1800. r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
  1801. le32_to_cpu(rec.r_clusters)) - cpos;
  1802. ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
  1803. ref_leaf_bh, index,
  1804. cpos, r_len,
  1805. meta_ac, dealloc);
  1806. if (ret) {
  1807. mlog_errno(ret);
  1808. goto out;
  1809. }
  1810. if (le32_to_cpu(rec.r_refcount) == 1 && delete) {
  1811. ret = ocfs2_cache_cluster_dealloc(dealloc,
  1812. ocfs2_clusters_to_blocks(sb, cpos),
  1813. r_len);
  1814. if (ret) {
  1815. mlog_errno(ret);
  1816. goto out;
  1817. }
  1818. }
  1819. cpos += r_len;
  1820. len -= r_len;
  1821. brelse(ref_leaf_bh);
  1822. ref_leaf_bh = NULL;
  1823. }
  1824. out:
  1825. brelse(ref_leaf_bh);
  1826. return ret;
  1827. }
  1828. /* Caller must hold refcount tree lock. */
  1829. int ocfs2_decrease_refcount(struct inode *inode,
  1830. handle_t *handle, u32 cpos, u32 len,
  1831. struct ocfs2_alloc_context *meta_ac,
  1832. struct ocfs2_cached_dealloc_ctxt *dealloc,
  1833. int delete)
  1834. {
  1835. int ret;
  1836. u64 ref_blkno;
  1837. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  1838. struct buffer_head *ref_root_bh = NULL;
  1839. struct ocfs2_refcount_tree *tree;
  1840. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  1841. ret = ocfs2_get_refcount_block(inode, &ref_blkno);
  1842. if (ret) {
  1843. mlog_errno(ret);
  1844. goto out;
  1845. }
  1846. ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
  1847. if (ret) {
  1848. mlog_errno(ret);
  1849. goto out;
  1850. }
  1851. ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
  1852. &ref_root_bh);
  1853. if (ret) {
  1854. mlog_errno(ret);
  1855. goto out;
  1856. }
  1857. ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
  1858. cpos, len, meta_ac, dealloc, delete);
  1859. if (ret)
  1860. mlog_errno(ret);
  1861. out:
  1862. brelse(ref_root_bh);
  1863. return ret;
  1864. }
  1865. /*
  1866. * Mark the already-existing extent at cpos as refcounted for len clusters.
  1867. * This adds the refcount extent flag.
  1868. *
  1869. * If the existing extent is larger than the request, initiate a
  1870. * split. An attempt will be made at merging with adjacent extents.
  1871. *
  1872. * The caller is responsible for passing down meta_ac if we'll need it.
  1873. */
  1874. static int ocfs2_mark_extent_refcounted(struct inode *inode,
  1875. struct ocfs2_extent_tree *et,
  1876. handle_t *handle, u32 cpos,
  1877. u32 len, u32 phys,
  1878. struct ocfs2_alloc_context *meta_ac,
  1879. struct ocfs2_cached_dealloc_ctxt *dealloc)
  1880. {
  1881. int ret;
  1882. mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n",
  1883. inode->i_ino, cpos, len, phys);
  1884. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  1885. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  1886. "tree, but the feature bit is not set in the "
  1887. "super block.", inode->i_ino);
  1888. ret = -EROFS;
  1889. goto out;
  1890. }
  1891. ret = ocfs2_change_extent_flag(handle, et, cpos,
  1892. len, phys, meta_ac, dealloc,
  1893. OCFS2_EXT_REFCOUNTED, 0);
  1894. if (ret)
  1895. mlog_errno(ret);
  1896. out:
  1897. return ret;
  1898. }
  1899. /*
  1900. * Given some contiguous physical clusters, calculate what we need
  1901. * for modifying their refcount.
  1902. */
  1903. static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
  1904. struct ocfs2_caching_info *ci,
  1905. struct buffer_head *ref_root_bh,
  1906. u64 start_cpos,
  1907. u32 clusters,
  1908. int *meta_add,
  1909. int *credits)
  1910. {
  1911. int ret = 0, index, ref_blocks = 0, recs_add = 0;
  1912. u64 cpos = start_cpos;
  1913. struct ocfs2_refcount_block *rb;
  1914. struct ocfs2_refcount_rec rec;
  1915. struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
  1916. u32 len;
  1917. mlog(0, "start_cpos %llu, clusters %u\n",
  1918. (unsigned long long)start_cpos, clusters);
  1919. while (clusters) {
  1920. ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
  1921. cpos, clusters, &rec,
  1922. &index, &ref_leaf_bh);
  1923. if (ret) {
  1924. mlog_errno(ret);
  1925. goto out;
  1926. }
  1927. if (ref_leaf_bh != prev_bh) {
  1928. /*
  1929. * Now we encounter a new leaf block, so calculate
  1930. * whether we need to extend the old leaf.
  1931. */
  1932. if (prev_bh) {
  1933. rb = (struct ocfs2_refcount_block *)
  1934. prev_bh->b_data;
  1935. if (le64_to_cpu(rb->rf_records.rl_used) +
  1936. recs_add >
  1937. le16_to_cpu(rb->rf_records.rl_count))
  1938. ref_blocks++;
  1939. }
  1940. recs_add = 0;
  1941. *credits += 1;
  1942. brelse(prev_bh);
  1943. prev_bh = ref_leaf_bh;
  1944. get_bh(prev_bh);
  1945. }
  1946. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  1947. mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu,"
  1948. "rec->r_clusters %u, rec->r_refcount %u, index %d\n",
  1949. recs_add, (unsigned long long)cpos, clusters,
  1950. (unsigned long long)le64_to_cpu(rec.r_cpos),
  1951. le32_to_cpu(rec.r_clusters),
  1952. le32_to_cpu(rec.r_refcount), index);
  1953. len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
  1954. le32_to_cpu(rec.r_clusters)) - cpos;
  1955. /*
  1956. * If the refcount rec already exist, cool. We just need
  1957. * to check whether there is a split. Otherwise we just need
  1958. * to increase the refcount.
  1959. * If we will insert one, increases recs_add.
  1960. *
  1961. * We record all the records which will be inserted to the
  1962. * same refcount block, so that we can tell exactly whether
  1963. * we need a new refcount block or not.
  1964. */
  1965. if (rec.r_refcount) {
  1966. /* Check whether we need a split at the beginning. */
  1967. if (cpos == start_cpos &&
  1968. cpos != le64_to_cpu(rec.r_cpos))
  1969. recs_add++;
  1970. /* Check whether we need a split in the end. */
  1971. if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
  1972. le32_to_cpu(rec.r_clusters))
  1973. recs_add++;
  1974. } else
  1975. recs_add++;
  1976. brelse(ref_leaf_bh);
  1977. ref_leaf_bh = NULL;
  1978. clusters -= len;
  1979. cpos += len;
  1980. }
  1981. if (prev_bh) {
  1982. rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
  1983. if (le64_to_cpu(rb->rf_records.rl_used) + recs_add >
  1984. le16_to_cpu(rb->rf_records.rl_count))
  1985. ref_blocks++;
  1986. *credits += 1;
  1987. }
  1988. if (!ref_blocks)
  1989. goto out;
  1990. mlog(0, "we need ref_blocks %d\n", ref_blocks);
  1991. *meta_add += ref_blocks;
  1992. *credits += ref_blocks;
  1993. /*
  1994. * So we may need ref_blocks to insert into the tree.
  1995. * That also means we need to change the b-tree and add that number
  1996. * of records since we never merge them.
  1997. * We need one more block for expansion since the new created leaf
  1998. * block is also full and needs split.
  1999. */
  2000. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  2001. if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
  2002. struct ocfs2_extent_tree et;
  2003. ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
  2004. *meta_add += ocfs2_extend_meta_needed(et.et_root_el);
  2005. *credits += ocfs2_calc_extend_credits(sb,
  2006. et.et_root_el,
  2007. ref_blocks);
  2008. } else {
  2009. *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
  2010. *meta_add += 1;
  2011. }
  2012. out:
  2013. brelse(ref_leaf_bh);
  2014. brelse(prev_bh);
  2015. return ret;
  2016. }
  2017. /*
  2018. * For refcount tree, we will decrease some contiguous clusters
  2019. * refcount count, so just go through it to see how many blocks
  2020. * we gonna touch and whether we need to create new blocks.
  2021. *
  2022. * Normally the refcount blocks store these refcount should be
  2023. * continguous also, so that we can get the number easily.
  2024. * As for meta_ac, we will at most add split 2 refcount record and
  2025. * 2 more refcount block, so just check it in a rough way.
  2026. *
  2027. * Caller must hold refcount tree lock.
  2028. */
  2029. int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
  2030. struct buffer_head *di_bh,
  2031. u64 phys_blkno,
  2032. u32 clusters,
  2033. int *credits,
  2034. struct ocfs2_alloc_context **meta_ac)
  2035. {
  2036. int ret, ref_blocks = 0;
  2037. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  2038. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  2039. struct buffer_head *ref_root_bh = NULL;
  2040. struct ocfs2_refcount_tree *tree;
  2041. u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
  2042. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  2043. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  2044. "tree, but the feature bit is not set in the "
  2045. "super block.", inode->i_ino);
  2046. ret = -EROFS;
  2047. goto out;
  2048. }
  2049. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  2050. ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
  2051. le64_to_cpu(di->i_refcount_loc), &tree);
  2052. if (ret) {
  2053. mlog_errno(ret);
  2054. goto out;
  2055. }
  2056. ret = ocfs2_read_refcount_block(&tree->rf_ci,
  2057. le64_to_cpu(di->i_refcount_loc),
  2058. &ref_root_bh);
  2059. if (ret) {
  2060. mlog_errno(ret);
  2061. goto out;
  2062. }
  2063. ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
  2064. &tree->rf_ci,
  2065. ref_root_bh,
  2066. start_cpos, clusters,
  2067. &ref_blocks, credits);
  2068. if (ret) {
  2069. mlog_errno(ret);
  2070. goto out;
  2071. }
  2072. mlog(0, "reserve new metadata %d, credits = %d\n",
  2073. ref_blocks, *credits);
  2074. if (ref_blocks) {
  2075. ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
  2076. ref_blocks, meta_ac);
  2077. if (ret)
  2078. mlog_errno(ret);
  2079. }
  2080. out:
  2081. brelse(ref_root_bh);
  2082. return ret;
  2083. }
  2084. #define MAX_CONTIG_BYTES 1048576
  2085. static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
  2086. {
  2087. return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
  2088. }
  2089. static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
  2090. {
  2091. return ~(ocfs2_cow_contig_clusters(sb) - 1);
  2092. }
  2093. /*
  2094. * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
  2095. * find an offset (start + (n * contig_clusters)) that is closest to cpos
  2096. * while still being less than or equal to it.
  2097. *
  2098. * The goal is to break the extent at a multiple of contig_clusters.
  2099. */
  2100. static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
  2101. unsigned int start,
  2102. unsigned int cpos)
  2103. {
  2104. BUG_ON(start > cpos);
  2105. return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
  2106. }
  2107. /*
  2108. * Given a cluster count of len, pad it out so that it is a multiple
  2109. * of contig_clusters.
  2110. */
  2111. static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
  2112. unsigned int len)
  2113. {
  2114. unsigned int padded =
  2115. (len + (ocfs2_cow_contig_clusters(sb) - 1)) &
  2116. ocfs2_cow_contig_mask(sb);
  2117. /* Did we wrap? */
  2118. if (padded < len)
  2119. padded = UINT_MAX;
  2120. return padded;
  2121. }
  2122. /*
  2123. * Calculate out the start and number of virtual clusters we need to to CoW.
  2124. *
  2125. * cpos is vitual start cluster position we want to do CoW in a
  2126. * file and write_len is the cluster length.
  2127. * max_cpos is the place where we want to stop CoW intentionally.
  2128. *
  2129. * Normal we will start CoW from the beginning of extent record cotaining cpos.
  2130. * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
  2131. * get good I/O from the resulting extent tree.
  2132. */
  2133. static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
  2134. struct ocfs2_extent_list *el,
  2135. u32 cpos,
  2136. u32 write_len,
  2137. u32 max_cpos,
  2138. u32 *cow_start,
  2139. u32 *cow_len)
  2140. {
  2141. int ret = 0;
  2142. int tree_height = le16_to_cpu(el->l_tree_depth), i;
  2143. struct buffer_head *eb_bh = NULL;
  2144. struct ocfs2_extent_block *eb = NULL;
  2145. struct ocfs2_extent_rec *rec;
  2146. unsigned int want_clusters, rec_end = 0;
  2147. int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
  2148. int leaf_clusters;
  2149. BUG_ON(cpos + write_len > max_cpos);
  2150. if (tree_height > 0) {
  2151. ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
  2152. if (ret) {
  2153. mlog_errno(ret);
  2154. goto out;
  2155. }
  2156. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  2157. el = &eb->h_list;
  2158. if (el->l_tree_depth) {
  2159. ocfs2_error(inode->i_sb,
  2160. "Inode %lu has non zero tree depth in "
  2161. "leaf block %llu\n", inode->i_ino,
  2162. (unsigned long long)eb_bh->b_blocknr);
  2163. ret = -EROFS;
  2164. goto out;
  2165. }
  2166. }
  2167. *cow_len = 0;
  2168. for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
  2169. rec = &el->l_recs[i];
  2170. if (ocfs2_is_empty_extent(rec)) {
  2171. mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
  2172. "index %d\n", inode->i_ino, i);
  2173. continue;
  2174. }
  2175. if (le32_to_cpu(rec->e_cpos) +
  2176. le16_to_cpu(rec->e_leaf_clusters) <= cpos)
  2177. continue;
  2178. if (*cow_len == 0) {
  2179. /*
  2180. * We should find a refcounted record in the
  2181. * first pass.
  2182. */
  2183. BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
  2184. *cow_start = le32_to_cpu(rec->e_cpos);
  2185. }
  2186. /*
  2187. * If we encounter a hole, a non-refcounted record or
  2188. * pass the max_cpos, stop the search.
  2189. */
  2190. if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
  2191. (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) ||
  2192. (max_cpos <= le32_to_cpu(rec->e_cpos)))
  2193. break;
  2194. leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
  2195. rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
  2196. if (rec_end > max_cpos) {
  2197. rec_end = max_cpos;
  2198. leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos);
  2199. }
  2200. /*
  2201. * How many clusters do we actually need from
  2202. * this extent? First we see how many we actually
  2203. * need to complete the write. If that's smaller
  2204. * than contig_clusters, we try for contig_clusters.
  2205. */
  2206. if (!*cow_len)
  2207. want_clusters = write_len;
  2208. else
  2209. want_clusters = (cpos + write_len) -
  2210. (*cow_start + *cow_len);
  2211. if (want_clusters < contig_clusters)
  2212. want_clusters = contig_clusters;
  2213. /*
  2214. * If the write does not cover the whole extent, we
  2215. * need to calculate how we're going to split the extent.
  2216. * We try to do it on contig_clusters boundaries.
  2217. *
  2218. * Any extent smaller than contig_clusters will be
  2219. * CoWed in its entirety.
  2220. */
  2221. if (leaf_clusters <= contig_clusters)
  2222. *cow_len += leaf_clusters;
  2223. else if (*cow_len || (*cow_start == cpos)) {
  2224. /*
  2225. * This extent needs to be CoW'd from its
  2226. * beginning, so all we have to do is compute
  2227. * how many clusters to grab. We align
  2228. * want_clusters to the edge of contig_clusters
  2229. * to get better I/O.
  2230. */
  2231. want_clusters = ocfs2_cow_align_length(inode->i_sb,
  2232. want_clusters);
  2233. if (leaf_clusters < want_clusters)
  2234. *cow_len += leaf_clusters;
  2235. else
  2236. *cow_len += want_clusters;
  2237. } else if ((*cow_start + contig_clusters) >=
  2238. (cpos + write_len)) {
  2239. /*
  2240. * Breaking off contig_clusters at the front
  2241. * of the extent will cover our write. That's
  2242. * easy.
  2243. */
  2244. *cow_len = contig_clusters;
  2245. } else if ((rec_end - cpos) <= contig_clusters) {
  2246. /*
  2247. * Breaking off contig_clusters at the tail of
  2248. * this extent will cover cpos.
  2249. */
  2250. *cow_start = rec_end - contig_clusters;
  2251. *cow_len = contig_clusters;
  2252. } else if ((rec_end - cpos) <= want_clusters) {
  2253. /*
  2254. * While we can't fit the entire write in this
  2255. * extent, we know that the write goes from cpos
  2256. * to the end of the extent. Break that off.
  2257. * We try to break it at some multiple of
  2258. * contig_clusters from the front of the extent.
  2259. * Failing that (ie, cpos is within
  2260. * contig_clusters of the front), we'll CoW the
  2261. * entire extent.
  2262. */
  2263. *cow_start = ocfs2_cow_align_start(inode->i_sb,
  2264. *cow_start, cpos);
  2265. *cow_len = rec_end - *cow_start;
  2266. } else {
  2267. /*
  2268. * Ok, the entire write lives in the middle of
  2269. * this extent. Let's try to slice the extent up
  2270. * nicely. Optimally, our CoW region starts at
  2271. * m*contig_clusters from the beginning of the
  2272. * extent and goes for n*contig_clusters,
  2273. * covering the entire write.
  2274. */
  2275. *cow_start = ocfs2_cow_align_start(inode->i_sb,
  2276. *cow_start, cpos);
  2277. want_clusters = (cpos + write_len) - *cow_start;
  2278. want_clusters = ocfs2_cow_align_length(inode->i_sb,
  2279. want_clusters);
  2280. if (*cow_start + want_clusters <= rec_end)
  2281. *cow_len = want_clusters;
  2282. else
  2283. *cow_len = rec_end - *cow_start;
  2284. }
  2285. /* Have we covered our entire write yet? */
  2286. if ((*cow_start + *cow_len) >= (cpos + write_len))
  2287. break;
  2288. /*
  2289. * If we reach the end of the extent block and don't get enough
  2290. * clusters, continue with the next extent block if possible.
  2291. */
  2292. if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
  2293. eb && eb->h_next_leaf_blk) {
  2294. brelse(eb_bh);
  2295. eb_bh = NULL;
  2296. ret = ocfs2_read_extent_block(INODE_CACHE(inode),
  2297. le64_to_cpu(eb->h_next_leaf_blk),
  2298. &eb_bh);
  2299. if (ret) {
  2300. mlog_errno(ret);
  2301. goto out;
  2302. }
  2303. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  2304. el = &eb->h_list;
  2305. i = -1;
  2306. }
  2307. }
  2308. out:
  2309. brelse(eb_bh);
  2310. return ret;
  2311. }
  2312. /*
  2313. * Prepare meta_ac, data_ac and calculate credits when we want to add some
  2314. * num_clusters in data_tree "et" and change the refcount for the old
  2315. * clusters(starting form p_cluster) in the refcount tree.
  2316. *
  2317. * Note:
  2318. * 1. since we may split the old tree, so we at most will need num_clusters + 2
  2319. * more new leaf records.
  2320. * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
  2321. * just give data_ac = NULL.
  2322. */
  2323. static int ocfs2_lock_refcount_allocators(struct super_block *sb,
  2324. u32 p_cluster, u32 num_clusters,
  2325. struct ocfs2_extent_tree *et,
  2326. struct ocfs2_caching_info *ref_ci,
  2327. struct buffer_head *ref_root_bh,
  2328. struct ocfs2_alloc_context **meta_ac,
  2329. struct ocfs2_alloc_context **data_ac,
  2330. int *credits)
  2331. {
  2332. int ret = 0, meta_add = 0;
  2333. int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et);
  2334. if (num_free_extents < 0) {
  2335. ret = num_free_extents;
  2336. mlog_errno(ret);
  2337. goto out;
  2338. }
  2339. if (num_free_extents < num_clusters + 2)
  2340. meta_add =
  2341. ocfs2_extend_meta_needed(et->et_root_el);
  2342. *credits += ocfs2_calc_extend_credits(sb, et->et_root_el,
  2343. num_clusters + 2);
  2344. ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
  2345. p_cluster, num_clusters,
  2346. &meta_add, credits);
  2347. if (ret) {
  2348. mlog_errno(ret);
  2349. goto out;
  2350. }
  2351. mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n",
  2352. meta_add, num_clusters, *credits);
  2353. ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
  2354. meta_ac);
  2355. if (ret) {
  2356. mlog_errno(ret);
  2357. goto out;
  2358. }
  2359. if (data_ac) {
  2360. ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
  2361. data_ac);
  2362. if (ret)
  2363. mlog_errno(ret);
  2364. }
  2365. out:
  2366. if (ret) {
  2367. if (*meta_ac) {
  2368. ocfs2_free_alloc_context(*meta_ac);
  2369. *meta_ac = NULL;
  2370. }
  2371. }
  2372. return ret;
  2373. }
  2374. static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
  2375. {
  2376. BUG_ON(buffer_dirty(bh));
  2377. clear_buffer_mapped(bh);
  2378. return 0;
  2379. }
  2380. static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
  2381. struct ocfs2_cow_context *context,
  2382. u32 cpos, u32 old_cluster,
  2383. u32 new_cluster, u32 new_len)
  2384. {
  2385. int ret = 0, partial;
  2386. struct ocfs2_caching_info *ci = context->data_et.et_ci;
  2387. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  2388. u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
  2389. struct page *page;
  2390. pgoff_t page_index;
  2391. unsigned int from, to;
  2392. loff_t offset, end, map_end;
  2393. struct address_space *mapping = context->inode->i_mapping;
  2394. mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster,
  2395. new_cluster, new_len, cpos);
  2396. offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
  2397. end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
  2398. while (offset < end) {
  2399. page_index = offset >> PAGE_CACHE_SHIFT;
  2400. map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
  2401. if (map_end > end)
  2402. map_end = end;
  2403. /* from, to is the offset within the page. */
  2404. from = offset & (PAGE_CACHE_SIZE - 1);
  2405. to = PAGE_CACHE_SIZE;
  2406. if (map_end & (PAGE_CACHE_SIZE - 1))
  2407. to = map_end & (PAGE_CACHE_SIZE - 1);
  2408. page = grab_cache_page(mapping, page_index);
  2409. /* This page can't be dirtied before we CoW it out. */
  2410. BUG_ON(PageDirty(page));
  2411. if (!PageUptodate(page)) {
  2412. ret = block_read_full_page(page, ocfs2_get_block);
  2413. if (ret) {
  2414. mlog_errno(ret);
  2415. goto unlock;
  2416. }
  2417. lock_page(page);
  2418. }
  2419. if (page_has_buffers(page)) {
  2420. ret = walk_page_buffers(handle, page_buffers(page),
  2421. from, to, &partial,
  2422. ocfs2_clear_cow_buffer);
  2423. if (ret) {
  2424. mlog_errno(ret);
  2425. goto unlock;
  2426. }
  2427. }
  2428. ocfs2_map_and_dirty_page(context->inode,
  2429. handle, from, to,
  2430. page, 0, &new_block);
  2431. mark_page_accessed(page);
  2432. unlock:
  2433. unlock_page(page);
  2434. page_cache_release(page);
  2435. page = NULL;
  2436. offset = map_end;
  2437. if (ret)
  2438. break;
  2439. }
  2440. return ret;
  2441. }
  2442. static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
  2443. struct ocfs2_cow_context *context,
  2444. u32 cpos, u32 old_cluster,
  2445. u32 new_cluster, u32 new_len)
  2446. {
  2447. int ret = 0;
  2448. struct super_block *sb = context->inode->i_sb;
  2449. struct ocfs2_caching_info *ci = context->data_et.et_ci;
  2450. int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
  2451. u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
  2452. u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
  2453. struct ocfs2_super *osb = OCFS2_SB(sb);
  2454. struct buffer_head *old_bh = NULL;
  2455. struct buffer_head *new_bh = NULL;
  2456. mlog(0, "old_cluster %u, new %u, len %u\n", old_cluster,
  2457. new_cluster, new_len);
  2458. for (i = 0; i < blocks; i++, old_block++, new_block++) {
  2459. new_bh = sb_getblk(osb->sb, new_block);
  2460. if (new_bh == NULL) {
  2461. ret = -EIO;
  2462. mlog_errno(ret);
  2463. break;
  2464. }
  2465. ocfs2_set_new_buffer_uptodate(ci, new_bh);
  2466. ret = ocfs2_read_block(ci, old_block, &old_bh, NULL);
  2467. if (ret) {
  2468. mlog_errno(ret);
  2469. break;
  2470. }
  2471. ret = ocfs2_journal_access(handle, ci, new_bh,
  2472. OCFS2_JOURNAL_ACCESS_CREATE);
  2473. if (ret) {
  2474. mlog_errno(ret);
  2475. break;
  2476. }
  2477. memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
  2478. ret = ocfs2_journal_dirty(handle, new_bh);
  2479. if (ret) {
  2480. mlog_errno(ret);
  2481. break;
  2482. }
  2483. brelse(new_bh);
  2484. brelse(old_bh);
  2485. new_bh = NULL;
  2486. old_bh = NULL;
  2487. }
  2488. brelse(new_bh);
  2489. brelse(old_bh);
  2490. return ret;
  2491. }
  2492. static int ocfs2_clear_ext_refcount(handle_t *handle,
  2493. struct ocfs2_extent_tree *et,
  2494. u32 cpos, u32 p_cluster, u32 len,
  2495. unsigned int ext_flags,
  2496. struct ocfs2_alloc_context *meta_ac,
  2497. struct ocfs2_cached_dealloc_ctxt *dealloc)
  2498. {
  2499. int ret, index;
  2500. struct ocfs2_extent_rec replace_rec;
  2501. struct ocfs2_path *path = NULL;
  2502. struct ocfs2_extent_list *el;
  2503. struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
  2504. u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
  2505. mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n",
  2506. (unsigned long long)ino, cpos, len, p_cluster, ext_flags);
  2507. memset(&replace_rec, 0, sizeof(replace_rec));
  2508. replace_rec.e_cpos = cpu_to_le32(cpos);
  2509. replace_rec.e_leaf_clusters = cpu_to_le16(len);
  2510. replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
  2511. p_cluster));
  2512. replace_rec.e_flags = ext_flags;
  2513. replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
  2514. path = ocfs2_new_path_from_et(et);
  2515. if (!path) {
  2516. ret = -ENOMEM;
  2517. mlog_errno(ret);
  2518. goto out;
  2519. }
  2520. ret = ocfs2_find_path(et->et_ci, path, cpos);
  2521. if (ret) {
  2522. mlog_errno(ret);
  2523. goto out;
  2524. }
  2525. el = path_leaf_el(path);
  2526. index = ocfs2_search_extent_list(el, cpos);
  2527. if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
  2528. ocfs2_error(sb,
  2529. "Inode %llu has an extent at cpos %u which can no "
  2530. "longer be found.\n",
  2531. (unsigned long long)ino, cpos);
  2532. ret = -EROFS;
  2533. goto out;
  2534. }
  2535. ret = ocfs2_split_extent(handle, et, path, index,
  2536. &replace_rec, meta_ac, dealloc);
  2537. if (ret)
  2538. mlog_errno(ret);
  2539. out:
  2540. ocfs2_free_path(path);
  2541. return ret;
  2542. }
  2543. static int ocfs2_replace_clusters(handle_t *handle,
  2544. struct ocfs2_cow_context *context,
  2545. u32 cpos, u32 old,
  2546. u32 new, u32 len,
  2547. unsigned int ext_flags)
  2548. {
  2549. int ret;
  2550. struct ocfs2_caching_info *ci = context->data_et.et_ci;
  2551. u64 ino = ocfs2_metadata_cache_owner(ci);
  2552. mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n",
  2553. (unsigned long long)ino, cpos, old, new, len, ext_flags);
  2554. /*If the old clusters is unwritten, no need to duplicate. */
  2555. if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
  2556. ret = context->cow_duplicate_clusters(handle, context, cpos,
  2557. old, new, len);
  2558. if (ret) {
  2559. mlog_errno(ret);
  2560. goto out;
  2561. }
  2562. }
  2563. ret = ocfs2_clear_ext_refcount(handle, &context->data_et,
  2564. cpos, new, len, ext_flags,
  2565. context->meta_ac, &context->dealloc);
  2566. if (ret)
  2567. mlog_errno(ret);
  2568. out:
  2569. return ret;
  2570. }
  2571. static int ocfs2_cow_sync_writeback(struct super_block *sb,
  2572. struct ocfs2_cow_context *context,
  2573. u32 cpos, u32 num_clusters)
  2574. {
  2575. int ret = 0;
  2576. loff_t offset, end, map_end;
  2577. pgoff_t page_index;
  2578. struct page *page;
  2579. if (ocfs2_should_order_data(context->inode))
  2580. return 0;
  2581. offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
  2582. end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
  2583. ret = filemap_fdatawrite_range(context->inode->i_mapping,
  2584. offset, end - 1);
  2585. if (ret < 0) {
  2586. mlog_errno(ret);
  2587. return ret;
  2588. }
  2589. while (offset < end) {
  2590. page_index = offset >> PAGE_CACHE_SHIFT;
  2591. map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
  2592. if (map_end > end)
  2593. map_end = end;
  2594. page = grab_cache_page(context->inode->i_mapping, page_index);
  2595. BUG_ON(!page);
  2596. wait_on_page_writeback(page);
  2597. if (PageError(page)) {
  2598. ret = -EIO;
  2599. mlog_errno(ret);
  2600. } else
  2601. mark_page_accessed(page);
  2602. unlock_page(page);
  2603. page_cache_release(page);
  2604. page = NULL;
  2605. offset = map_end;
  2606. if (ret)
  2607. break;
  2608. }
  2609. return ret;
  2610. }
  2611. static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context,
  2612. u32 v_cluster, u32 *p_cluster,
  2613. u32 *num_clusters,
  2614. unsigned int *extent_flags)
  2615. {
  2616. return ocfs2_get_clusters(context->inode, v_cluster, p_cluster,
  2617. num_clusters, extent_flags);
  2618. }
  2619. static int ocfs2_make_clusters_writable(struct super_block *sb,
  2620. struct ocfs2_cow_context *context,
  2621. u32 cpos, u32 p_cluster,
  2622. u32 num_clusters, unsigned int e_flags)
  2623. {
  2624. int ret, delete, index, credits = 0;
  2625. u32 new_bit, new_len;
  2626. unsigned int set_len;
  2627. struct ocfs2_super *osb = OCFS2_SB(sb);
  2628. handle_t *handle;
  2629. struct buffer_head *ref_leaf_bh = NULL;
  2630. struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
  2631. struct ocfs2_refcount_rec rec;
  2632. mlog(0, "cpos %u, p_cluster %u, num_clusters %u, e_flags %u\n",
  2633. cpos, p_cluster, num_clusters, e_flags);
  2634. ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
  2635. &context->data_et,
  2636. ref_ci,
  2637. context->ref_root_bh,
  2638. &context->meta_ac,
  2639. &context->data_ac, &credits);
  2640. if (ret) {
  2641. mlog_errno(ret);
  2642. return ret;
  2643. }
  2644. if (context->post_refcount)
  2645. credits += context->post_refcount->credits;
  2646. credits += context->extra_credits;
  2647. handle = ocfs2_start_trans(osb, credits);
  2648. if (IS_ERR(handle)) {
  2649. ret = PTR_ERR(handle);
  2650. mlog_errno(ret);
  2651. goto out;
  2652. }
  2653. while (num_clusters) {
  2654. ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
  2655. p_cluster, num_clusters,
  2656. &rec, &index, &ref_leaf_bh);
  2657. if (ret) {
  2658. mlog_errno(ret);
  2659. goto out_commit;
  2660. }
  2661. BUG_ON(!rec.r_refcount);
  2662. set_len = min((u64)p_cluster + num_clusters,
  2663. le64_to_cpu(rec.r_cpos) +
  2664. le32_to_cpu(rec.r_clusters)) - p_cluster;
  2665. /*
  2666. * There are many different situation here.
  2667. * 1. If refcount == 1, remove the flag and don't COW.
  2668. * 2. If refcount > 1, allocate clusters.
  2669. * Here we may not allocate r_len once at a time, so continue
  2670. * until we reach num_clusters.
  2671. */
  2672. if (le32_to_cpu(rec.r_refcount) == 1) {
  2673. delete = 0;
  2674. ret = ocfs2_clear_ext_refcount(handle,
  2675. &context->data_et,
  2676. cpos, p_cluster,
  2677. set_len, e_flags,
  2678. context->meta_ac,
  2679. &context->dealloc);
  2680. if (ret) {
  2681. mlog_errno(ret);
  2682. goto out_commit;
  2683. }
  2684. } else {
  2685. delete = 1;
  2686. ret = __ocfs2_claim_clusters(osb, handle,
  2687. context->data_ac,
  2688. 1, set_len,
  2689. &new_bit, &new_len);
  2690. if (ret) {
  2691. mlog_errno(ret);
  2692. goto out_commit;
  2693. }
  2694. ret = ocfs2_replace_clusters(handle, context,
  2695. cpos, p_cluster, new_bit,
  2696. new_len, e_flags);
  2697. if (ret) {
  2698. mlog_errno(ret);
  2699. goto out_commit;
  2700. }
  2701. set_len = new_len;
  2702. }
  2703. ret = __ocfs2_decrease_refcount(handle, ref_ci,
  2704. context->ref_root_bh,
  2705. p_cluster, set_len,
  2706. context->meta_ac,
  2707. &context->dealloc, delete);
  2708. if (ret) {
  2709. mlog_errno(ret);
  2710. goto out_commit;
  2711. }
  2712. cpos += set_len;
  2713. p_cluster += set_len;
  2714. num_clusters -= set_len;
  2715. brelse(ref_leaf_bh);
  2716. ref_leaf_bh = NULL;
  2717. }
  2718. /* handle any post_cow action. */
  2719. if (context->post_refcount && context->post_refcount->func) {
  2720. ret = context->post_refcount->func(context->inode, handle,
  2721. context->post_refcount->para);
  2722. if (ret) {
  2723. mlog_errno(ret);
  2724. goto out_commit;
  2725. }
  2726. }
  2727. /*
  2728. * Here we should write the new page out first if we are
  2729. * in write-back mode.
  2730. */
  2731. if (context->get_clusters == ocfs2_di_get_clusters) {
  2732. ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
  2733. if (ret)
  2734. mlog_errno(ret);
  2735. }
  2736. out_commit:
  2737. ocfs2_commit_trans(osb, handle);
  2738. out:
  2739. if (context->data_ac) {
  2740. ocfs2_free_alloc_context(context->data_ac);
  2741. context->data_ac = NULL;
  2742. }
  2743. if (context->meta_ac) {
  2744. ocfs2_free_alloc_context(context->meta_ac);
  2745. context->meta_ac = NULL;
  2746. }
  2747. brelse(ref_leaf_bh);
  2748. return ret;
  2749. }
  2750. static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
  2751. {
  2752. int ret = 0;
  2753. struct inode *inode = context->inode;
  2754. u32 cow_start = context->cow_start, cow_len = context->cow_len;
  2755. u32 p_cluster, num_clusters;
  2756. unsigned int ext_flags;
  2757. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  2758. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
  2759. ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
  2760. "tree, but the feature bit is not set in the "
  2761. "super block.", inode->i_ino);
  2762. return -EROFS;
  2763. }
  2764. ocfs2_init_dealloc_ctxt(&context->dealloc);
  2765. while (cow_len) {
  2766. ret = context->get_clusters(context, cow_start, &p_cluster,
  2767. &num_clusters, &ext_flags);
  2768. if (ret) {
  2769. mlog_errno(ret);
  2770. break;
  2771. }
  2772. BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
  2773. if (cow_len < num_clusters)
  2774. num_clusters = cow_len;
  2775. ret = ocfs2_make_clusters_writable(inode->i_sb, context,
  2776. cow_start, p_cluster,
  2777. num_clusters, ext_flags);
  2778. if (ret) {
  2779. mlog_errno(ret);
  2780. break;
  2781. }
  2782. cow_len -= num_clusters;
  2783. cow_start += num_clusters;
  2784. }
  2785. if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
  2786. ocfs2_schedule_truncate_log_flush(osb, 1);
  2787. ocfs2_run_deallocs(osb, &context->dealloc);
  2788. }
  2789. return ret;
  2790. }
  2791. /*
  2792. * Starting at cpos, try to CoW write_len clusters. Don't CoW
  2793. * past max_cpos. This will stop when it runs into a hole or an
  2794. * unrefcounted extent.
  2795. */
  2796. static int ocfs2_refcount_cow_hunk(struct inode *inode,
  2797. struct buffer_head *di_bh,
  2798. u32 cpos, u32 write_len, u32 max_cpos)
  2799. {
  2800. int ret;
  2801. u32 cow_start = 0, cow_len = 0;
  2802. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  2803. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  2804. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  2805. struct buffer_head *ref_root_bh = NULL;
  2806. struct ocfs2_refcount_tree *ref_tree;
  2807. struct ocfs2_cow_context *context = NULL;
  2808. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  2809. ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list,
  2810. cpos, write_len, max_cpos,
  2811. &cow_start, &cow_len);
  2812. if (ret) {
  2813. mlog_errno(ret);
  2814. goto out;
  2815. }
  2816. mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, "
  2817. "cow_len %u\n", inode->i_ino,
  2818. cpos, write_len, cow_start, cow_len);
  2819. BUG_ON(cow_len == 0);
  2820. context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
  2821. if (!context) {
  2822. ret = -ENOMEM;
  2823. mlog_errno(ret);
  2824. goto out;
  2825. }
  2826. ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
  2827. 1, &ref_tree, &ref_root_bh);
  2828. if (ret) {
  2829. mlog_errno(ret);
  2830. goto out;
  2831. }
  2832. context->inode = inode;
  2833. context->cow_start = cow_start;
  2834. context->cow_len = cow_len;
  2835. context->ref_tree = ref_tree;
  2836. context->ref_root_bh = ref_root_bh;
  2837. context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
  2838. context->get_clusters = ocfs2_di_get_clusters;
  2839. ocfs2_init_dinode_extent_tree(&context->data_et,
  2840. INODE_CACHE(inode), di_bh);
  2841. ret = ocfs2_replace_cow(context);
  2842. if (ret)
  2843. mlog_errno(ret);
  2844. /*
  2845. * truncate the extent map here since no matter whether we meet with
  2846. * any error during the action, we shouldn't trust cached extent map
  2847. * any more.
  2848. */
  2849. ocfs2_extent_map_trunc(inode, cow_start);
  2850. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  2851. brelse(ref_root_bh);
  2852. out:
  2853. kfree(context);
  2854. return ret;
  2855. }
  2856. /*
  2857. * CoW any and all clusters between cpos and cpos+write_len.
  2858. * Don't CoW past max_cpos. If this returns successfully, all
  2859. * clusters between cpos and cpos+write_len are safe to modify.
  2860. */
  2861. int ocfs2_refcount_cow(struct inode *inode,
  2862. struct buffer_head *di_bh,
  2863. u32 cpos, u32 write_len, u32 max_cpos)
  2864. {
  2865. int ret = 0;
  2866. u32 p_cluster, num_clusters;
  2867. unsigned int ext_flags;
  2868. while (write_len) {
  2869. ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
  2870. &num_clusters, &ext_flags);
  2871. if (ret) {
  2872. mlog_errno(ret);
  2873. break;
  2874. }
  2875. if (write_len < num_clusters)
  2876. num_clusters = write_len;
  2877. if (ext_flags & OCFS2_EXT_REFCOUNTED) {
  2878. ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
  2879. num_clusters, max_cpos);
  2880. if (ret) {
  2881. mlog_errno(ret);
  2882. break;
  2883. }
  2884. }
  2885. write_len -= num_clusters;
  2886. cpos += num_clusters;
  2887. }
  2888. return ret;
  2889. }
  2890. static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context,
  2891. u32 v_cluster, u32 *p_cluster,
  2892. u32 *num_clusters,
  2893. unsigned int *extent_flags)
  2894. {
  2895. struct inode *inode = context->inode;
  2896. struct ocfs2_xattr_value_root *xv = context->cow_object;
  2897. return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster,
  2898. num_clusters, &xv->xr_list,
  2899. extent_flags);
  2900. }
  2901. /*
  2902. * Given a xattr value root, calculate the most meta/credits we need for
  2903. * refcount tree change if we truncate it to 0.
  2904. */
  2905. int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
  2906. struct ocfs2_caching_info *ref_ci,
  2907. struct buffer_head *ref_root_bh,
  2908. struct ocfs2_xattr_value_root *xv,
  2909. int *meta_add, int *credits)
  2910. {
  2911. int ret = 0, index, ref_blocks = 0;
  2912. u32 p_cluster, num_clusters;
  2913. u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters);
  2914. struct ocfs2_refcount_block *rb;
  2915. struct ocfs2_refcount_rec rec;
  2916. struct buffer_head *ref_leaf_bh = NULL;
  2917. while (cpos < clusters) {
  2918. ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
  2919. &num_clusters, &xv->xr_list,
  2920. NULL);
  2921. if (ret) {
  2922. mlog_errno(ret);
  2923. goto out;
  2924. }
  2925. cpos += num_clusters;
  2926. while (num_clusters) {
  2927. ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh,
  2928. p_cluster, num_clusters,
  2929. &rec, &index,
  2930. &ref_leaf_bh);
  2931. if (ret) {
  2932. mlog_errno(ret);
  2933. goto out;
  2934. }
  2935. BUG_ON(!rec.r_refcount);
  2936. rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
  2937. /*
  2938. * We really don't know whether the other clusters is in
  2939. * this refcount block or not, so just take the worst
  2940. * case that all the clusters are in this block and each
  2941. * one will split a refcount rec, so totally we need
  2942. * clusters * 2 new refcount rec.
  2943. */
  2944. if (le64_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
  2945. le16_to_cpu(rb->rf_records.rl_count))
  2946. ref_blocks++;
  2947. *credits += 1;
  2948. brelse(ref_leaf_bh);
  2949. ref_leaf_bh = NULL;
  2950. if (num_clusters <= le32_to_cpu(rec.r_clusters))
  2951. break;
  2952. else
  2953. num_clusters -= le32_to_cpu(rec.r_clusters);
  2954. p_cluster += num_clusters;
  2955. }
  2956. }
  2957. *meta_add += ref_blocks;
  2958. if (!ref_blocks)
  2959. goto out;
  2960. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  2961. if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
  2962. *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
  2963. else {
  2964. struct ocfs2_extent_tree et;
  2965. ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
  2966. *credits += ocfs2_calc_extend_credits(inode->i_sb,
  2967. et.et_root_el,
  2968. ref_blocks);
  2969. }
  2970. out:
  2971. brelse(ref_leaf_bh);
  2972. return ret;
  2973. }
  2974. /*
  2975. * Do CoW for xattr.
  2976. */
  2977. int ocfs2_refcount_cow_xattr(struct inode *inode,
  2978. struct ocfs2_dinode *di,
  2979. struct ocfs2_xattr_value_buf *vb,
  2980. struct ocfs2_refcount_tree *ref_tree,
  2981. struct buffer_head *ref_root_bh,
  2982. u32 cpos, u32 write_len,
  2983. struct ocfs2_post_refcount *post)
  2984. {
  2985. int ret;
  2986. struct ocfs2_xattr_value_root *xv = vb->vb_xv;
  2987. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  2988. struct ocfs2_cow_context *context = NULL;
  2989. u32 cow_start, cow_len;
  2990. BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
  2991. ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list,
  2992. cpos, write_len, UINT_MAX,
  2993. &cow_start, &cow_len);
  2994. if (ret) {
  2995. mlog_errno(ret);
  2996. goto out;
  2997. }
  2998. BUG_ON(cow_len == 0);
  2999. context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
  3000. if (!context) {
  3001. ret = -ENOMEM;
  3002. mlog_errno(ret);
  3003. goto out;
  3004. }
  3005. context->inode = inode;
  3006. context->cow_start = cow_start;
  3007. context->cow_len = cow_len;
  3008. context->ref_tree = ref_tree;
  3009. context->ref_root_bh = ref_root_bh;;
  3010. context->cow_object = xv;
  3011. context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd;
  3012. /* We need the extra credits for duplicate_clusters by jbd. */
  3013. context->extra_credits =
  3014. ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len;
  3015. context->get_clusters = ocfs2_xattr_value_get_clusters;
  3016. context->post_refcount = post;
  3017. ocfs2_init_xattr_value_extent_tree(&context->data_et,
  3018. INODE_CACHE(inode), vb);
  3019. ret = ocfs2_replace_cow(context);
  3020. if (ret)
  3021. mlog_errno(ret);
  3022. out:
  3023. kfree(context);
  3024. return ret;
  3025. }
  3026. /*
  3027. * Insert a new extent into refcount tree and mark a extent rec
  3028. * as refcounted in the dinode tree.
  3029. */
  3030. int ocfs2_add_refcount_flag(struct inode *inode,
  3031. struct ocfs2_extent_tree *data_et,
  3032. struct ocfs2_caching_info *ref_ci,
  3033. struct buffer_head *ref_root_bh,
  3034. u32 cpos, u32 p_cluster, u32 num_clusters,
  3035. struct ocfs2_cached_dealloc_ctxt *dealloc)
  3036. {
  3037. int ret;
  3038. handle_t *handle;
  3039. int credits = 1, ref_blocks = 0;
  3040. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3041. struct ocfs2_alloc_context *meta_ac = NULL;
  3042. ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
  3043. ref_ci, ref_root_bh,
  3044. p_cluster, num_clusters,
  3045. &ref_blocks, &credits);
  3046. if (ret) {
  3047. mlog_errno(ret);
  3048. goto out;
  3049. }
  3050. mlog(0, "reserve new metadata %d, credits = %d\n",
  3051. ref_blocks, credits);
  3052. if (ref_blocks) {
  3053. ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
  3054. ref_blocks, &meta_ac);
  3055. if (ret) {
  3056. mlog_errno(ret);
  3057. goto out;
  3058. }
  3059. }
  3060. handle = ocfs2_start_trans(osb, credits);
  3061. if (IS_ERR(handle)) {
  3062. ret = PTR_ERR(handle);
  3063. mlog_errno(ret);
  3064. goto out;
  3065. }
  3066. ret = ocfs2_mark_extent_refcounted(inode, data_et, handle,
  3067. cpos, num_clusters, p_cluster,
  3068. meta_ac, dealloc);
  3069. if (ret) {
  3070. mlog_errno(ret);
  3071. goto out_commit;
  3072. }
  3073. ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
  3074. p_cluster, num_clusters,
  3075. meta_ac, dealloc);
  3076. if (ret)
  3077. mlog_errno(ret);
  3078. out_commit:
  3079. ocfs2_commit_trans(osb, handle);
  3080. out:
  3081. if (meta_ac)
  3082. ocfs2_free_alloc_context(meta_ac);
  3083. return ret;
  3084. }
  3085. static int ocfs2_change_ctime(struct inode *inode,
  3086. struct buffer_head *di_bh)
  3087. {
  3088. int ret;
  3089. handle_t *handle;
  3090. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  3091. handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
  3092. OCFS2_INODE_UPDATE_CREDITS);
  3093. if (IS_ERR(handle)) {
  3094. ret = PTR_ERR(handle);
  3095. mlog_errno(ret);
  3096. goto out;
  3097. }
  3098. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  3099. OCFS2_JOURNAL_ACCESS_WRITE);
  3100. if (ret) {
  3101. mlog_errno(ret);
  3102. goto out_commit;
  3103. }
  3104. inode->i_ctime = CURRENT_TIME;
  3105. di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  3106. di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  3107. ocfs2_journal_dirty(handle, di_bh);
  3108. out_commit:
  3109. ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
  3110. out:
  3111. return ret;
  3112. }
  3113. static int ocfs2_attach_refcount_tree(struct inode *inode,
  3114. struct buffer_head *di_bh)
  3115. {
  3116. int ret, data_changed = 0;
  3117. struct buffer_head *ref_root_bh = NULL;
  3118. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  3119. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  3120. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3121. struct ocfs2_refcount_tree *ref_tree;
  3122. unsigned int ext_flags;
  3123. loff_t size;
  3124. u32 cpos, num_clusters, clusters, p_cluster;
  3125. struct ocfs2_cached_dealloc_ctxt dealloc;
  3126. struct ocfs2_extent_tree di_et;
  3127. ocfs2_init_dealloc_ctxt(&dealloc);
  3128. if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) {
  3129. ret = ocfs2_create_refcount_tree(inode, di_bh);
  3130. if (ret) {
  3131. mlog_errno(ret);
  3132. goto out;
  3133. }
  3134. }
  3135. BUG_ON(!di->i_refcount_loc);
  3136. ret = ocfs2_lock_refcount_tree(osb,
  3137. le64_to_cpu(di->i_refcount_loc), 1,
  3138. &ref_tree, &ref_root_bh);
  3139. if (ret) {
  3140. mlog_errno(ret);
  3141. goto out;
  3142. }
  3143. ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
  3144. size = i_size_read(inode);
  3145. clusters = ocfs2_clusters_for_bytes(inode->i_sb, size);
  3146. cpos = 0;
  3147. while (cpos < clusters) {
  3148. ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
  3149. &num_clusters, &ext_flags);
  3150. if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
  3151. ret = ocfs2_add_refcount_flag(inode, &di_et,
  3152. &ref_tree->rf_ci,
  3153. ref_root_bh, cpos,
  3154. p_cluster, num_clusters,
  3155. &dealloc);
  3156. if (ret) {
  3157. mlog_errno(ret);
  3158. goto unlock;
  3159. }
  3160. data_changed = 1;
  3161. }
  3162. cpos += num_clusters;
  3163. }
  3164. if (data_changed) {
  3165. ret = ocfs2_change_ctime(inode, di_bh);
  3166. if (ret)
  3167. mlog_errno(ret);
  3168. }
  3169. unlock:
  3170. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  3171. brelse(ref_root_bh);
  3172. if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) {
  3173. ocfs2_schedule_truncate_log_flush(osb, 1);
  3174. ocfs2_run_deallocs(osb, &dealloc);
  3175. }
  3176. out:
  3177. /*
  3178. * Empty the extent map so that we may get the right extent
  3179. * record from the disk.
  3180. */
  3181. ocfs2_extent_map_trunc(inode, 0);
  3182. return ret;
  3183. }
  3184. static int ocfs2_add_refcounted_extent(struct inode *inode,
  3185. struct ocfs2_extent_tree *et,
  3186. struct ocfs2_caching_info *ref_ci,
  3187. struct buffer_head *ref_root_bh,
  3188. u32 cpos, u32 p_cluster, u32 num_clusters,
  3189. unsigned int ext_flags,
  3190. struct ocfs2_cached_dealloc_ctxt *dealloc)
  3191. {
  3192. int ret;
  3193. handle_t *handle;
  3194. int credits = 0;
  3195. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  3196. struct ocfs2_alloc_context *meta_ac = NULL;
  3197. ret = ocfs2_lock_refcount_allocators(inode->i_sb,
  3198. p_cluster, num_clusters,
  3199. et, ref_ci,
  3200. ref_root_bh, &meta_ac,
  3201. NULL, &credits);
  3202. if (ret) {
  3203. mlog_errno(ret);
  3204. goto out;
  3205. }
  3206. handle = ocfs2_start_trans(osb, credits);
  3207. if (IS_ERR(handle)) {
  3208. ret = PTR_ERR(handle);
  3209. mlog_errno(ret);
  3210. goto out;
  3211. }
  3212. ret = ocfs2_insert_extent(handle, et, cpos,
  3213. cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
  3214. p_cluster)),
  3215. num_clusters, ext_flags, meta_ac);
  3216. if (ret) {
  3217. mlog_errno(ret);
  3218. goto out_commit;
  3219. }
  3220. ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
  3221. p_cluster, num_clusters,
  3222. meta_ac, dealloc);
  3223. if (ret)
  3224. mlog_errno(ret);
  3225. out_commit:
  3226. ocfs2_commit_trans(osb, handle);
  3227. out:
  3228. if (meta_ac)
  3229. ocfs2_free_alloc_context(meta_ac);
  3230. return ret;
  3231. }
  3232. static int ocfs2_duplicate_extent_list(struct inode *s_inode,
  3233. struct inode *t_inode,
  3234. struct buffer_head *t_bh,
  3235. struct ocfs2_caching_info *ref_ci,
  3236. struct buffer_head *ref_root_bh,
  3237. struct ocfs2_cached_dealloc_ctxt *dealloc)
  3238. {
  3239. int ret = 0;
  3240. u32 p_cluster, num_clusters, clusters, cpos;
  3241. loff_t size;
  3242. unsigned int ext_flags;
  3243. struct ocfs2_extent_tree et;
  3244. ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh);
  3245. size = i_size_read(s_inode);
  3246. clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size);
  3247. cpos = 0;
  3248. while (cpos < clusters) {
  3249. ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
  3250. &num_clusters, &ext_flags);
  3251. if (p_cluster) {
  3252. ret = ocfs2_add_refcounted_extent(t_inode, &et,
  3253. ref_ci, ref_root_bh,
  3254. cpos, p_cluster,
  3255. num_clusters,
  3256. ext_flags,
  3257. dealloc);
  3258. if (ret) {
  3259. mlog_errno(ret);
  3260. goto out;
  3261. }
  3262. }
  3263. cpos += num_clusters;
  3264. }
  3265. out:
  3266. return ret;
  3267. }
  3268. /*
  3269. * change the new file's attributes to the src.
  3270. *
  3271. * reflink creates a snapshot of a file, that means the attributes
  3272. * must be identical except for three exceptions - nlink, ino, and ctime.
  3273. */
  3274. static int ocfs2_complete_reflink(struct inode *s_inode,
  3275. struct buffer_head *s_bh,
  3276. struct inode *t_inode,
  3277. struct buffer_head *t_bh)
  3278. {
  3279. int ret;
  3280. handle_t *handle;
  3281. struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
  3282. struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data;
  3283. loff_t size = i_size_read(s_inode);
  3284. handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb),
  3285. OCFS2_INODE_UPDATE_CREDITS);
  3286. if (IS_ERR(handle)) {
  3287. ret = PTR_ERR(handle);
  3288. mlog_errno(ret);
  3289. return ret;
  3290. }
  3291. ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
  3292. OCFS2_JOURNAL_ACCESS_WRITE);
  3293. if (ret) {
  3294. mlog_errno(ret);
  3295. goto out_commit;
  3296. }
  3297. spin_lock(&OCFS2_I(t_inode)->ip_lock);
  3298. OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters;
  3299. OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr;
  3300. OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
  3301. spin_unlock(&OCFS2_I(t_inode)->ip_lock);
  3302. i_size_write(t_inode, size);
  3303. di->i_xattr_inline_size = s_di->i_xattr_inline_size;
  3304. di->i_clusters = s_di->i_clusters;
  3305. di->i_size = s_di->i_size;
  3306. di->i_dyn_features = s_di->i_dyn_features;
  3307. di->i_attr = s_di->i_attr;
  3308. di->i_uid = s_di->i_uid;
  3309. di->i_gid = s_di->i_gid;
  3310. di->i_mode = s_di->i_mode;
  3311. /*
  3312. * update time.
  3313. * we want mtime to appear identical to the source and update ctime.
  3314. */
  3315. t_inode->i_ctime = CURRENT_TIME;
  3316. di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
  3317. di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
  3318. t_inode->i_mtime = s_inode->i_mtime;
  3319. di->i_mtime = s_di->i_mtime;
  3320. di->i_mtime_nsec = s_di->i_mtime_nsec;
  3321. ocfs2_journal_dirty(handle, t_bh);
  3322. out_commit:
  3323. ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle);
  3324. return ret;
  3325. }
  3326. static int ocfs2_create_reflink_node(struct inode *s_inode,
  3327. struct buffer_head *s_bh,
  3328. struct inode *t_inode,
  3329. struct buffer_head *t_bh)
  3330. {
  3331. int ret;
  3332. struct buffer_head *ref_root_bh = NULL;
  3333. struct ocfs2_cached_dealloc_ctxt dealloc;
  3334. struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
  3335. struct ocfs2_refcount_block *rb;
  3336. struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
  3337. struct ocfs2_refcount_tree *ref_tree;
  3338. ocfs2_init_dealloc_ctxt(&dealloc);
  3339. ret = ocfs2_set_refcount_tree(t_inode, t_bh,
  3340. le64_to_cpu(di->i_refcount_loc));
  3341. if (ret) {
  3342. mlog_errno(ret);
  3343. goto out;
  3344. }
  3345. ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
  3346. 1, &ref_tree, &ref_root_bh);
  3347. if (ret) {
  3348. mlog_errno(ret);
  3349. goto out;
  3350. }
  3351. rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
  3352. ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
  3353. &ref_tree->rf_ci, ref_root_bh,
  3354. &dealloc);
  3355. if (ret) {
  3356. mlog_errno(ret);
  3357. goto out_unlock_refcount;
  3358. }
  3359. ret = ocfs2_complete_reflink(s_inode, s_bh, t_inode, t_bh);
  3360. if (ret)
  3361. mlog_errno(ret);
  3362. out_unlock_refcount:
  3363. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  3364. brelse(ref_root_bh);
  3365. out:
  3366. if (ocfs2_dealloc_has_cluster(&dealloc)) {
  3367. ocfs2_schedule_truncate_log_flush(osb, 1);
  3368. ocfs2_run_deallocs(osb, &dealloc);
  3369. }
  3370. return ret;
  3371. }