xfs_bmap_btree.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440
  1. /*
  2. * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_types.h"
  21. #include "xfs_bit.h"
  22. #include "xfs_log.h"
  23. #include "xfs_inum.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_ag.h"
  27. #include "xfs_dir2.h"
  28. #include "xfs_dmapi.h"
  29. #include "xfs_mount.h"
  30. #include "xfs_bmap_btree.h"
  31. #include "xfs_alloc_btree.h"
  32. #include "xfs_ialloc_btree.h"
  33. #include "xfs_dir2_sf.h"
  34. #include "xfs_attr_sf.h"
  35. #include "xfs_dinode.h"
  36. #include "xfs_inode.h"
  37. #include "xfs_inode_item.h"
  38. #include "xfs_alloc.h"
  39. #include "xfs_btree.h"
  40. #include "xfs_btree_trace.h"
  41. #include "xfs_ialloc.h"
  42. #include "xfs_itable.h"
  43. #include "xfs_bmap.h"
  44. #include "xfs_error.h"
  45. #include "xfs_quota.h"
  46. /*
  47. * Prototypes for internal btree functions.
  48. */
  49. STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
  50. STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
  51. #undef EXIT
  52. #define ENTRY XBT_ENTRY
  53. #define ERROR XBT_ERROR
  54. #define EXIT XBT_EXIT
  55. /*
  56. * Keep the XFS_BMBT_TRACE_ names around for now until all code using them
  57. * is converted to be generic and thus switches to the XFS_BTREE_TRACE_ names.
  58. */
  59. #define XFS_BMBT_TRACE_ARGBI(c,b,i) \
  60. XFS_BTREE_TRACE_ARGBI(c,b,i)
  61. #define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \
  62. XFS_BTREE_TRACE_ARGBII(c,b,i,j)
  63. #define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \
  64. XFS_BTREE_TRACE_ARGFFFI(c,o,b,i,j)
  65. #define XFS_BMBT_TRACE_ARGI(c,i) \
  66. XFS_BTREE_TRACE_ARGI(c,i)
  67. #define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \
  68. XFS_BTREE_TRACE_ARGIPK(c,i,(union xfs_btree_ptr)f,s)
  69. #define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \
  70. XFS_BTREE_TRACE_ARGIPR(c,i, \
  71. (union xfs_btree_ptr)f, (union xfs_btree_rec *)r)
  72. #define XFS_BMBT_TRACE_ARGIK(c,i,k) \
  73. XFS_BTREE_TRACE_ARGIK(c,i,(union xfs_btree_key *)k)
  74. #define XFS_BMBT_TRACE_CURSOR(c,s) \
  75. XFS_BTREE_TRACE_CURSOR(c,s)
  76. /*
  77. * Internal functions.
  78. */
  79. /*
  80. * Delete record pointed to by cur/level.
  81. */
  82. STATIC int /* error */
  83. xfs_bmbt_delrec(
  84. xfs_btree_cur_t *cur,
  85. int level,
  86. int *stat) /* success/failure */
  87. {
  88. xfs_bmbt_block_t *block; /* bmap btree block */
  89. xfs_fsblock_t bno; /* fs-relative block number */
  90. xfs_buf_t *bp; /* buffer for block */
  91. int error; /* error return value */
  92. int i; /* loop counter */
  93. int j; /* temp state */
  94. xfs_bmbt_key_t key; /* bmap btree key */
  95. xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */
  96. xfs_fsblock_t lbno; /* left sibling block number */
  97. xfs_buf_t *lbp; /* left buffer pointer */
  98. xfs_bmbt_block_t *left; /* left btree block */
  99. xfs_bmbt_key_t *lkp; /* left btree key */
  100. xfs_bmbt_ptr_t *lpp; /* left address pointer */
  101. int lrecs=0; /* left record count */
  102. xfs_bmbt_rec_t *lrp; /* left record pointer */
  103. xfs_mount_t *mp; /* file system mount point */
  104. xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
  105. int ptr; /* key/record index */
  106. xfs_fsblock_t rbno; /* right sibling block number */
  107. xfs_buf_t *rbp; /* right buffer pointer */
  108. xfs_bmbt_block_t *right; /* right btree block */
  109. xfs_bmbt_key_t *rkp; /* right btree key */
  110. xfs_bmbt_rec_t *rp; /* pointer to bmap btree rec */
  111. xfs_bmbt_ptr_t *rpp; /* right address pointer */
  112. xfs_bmbt_block_t *rrblock; /* right-right btree block */
  113. xfs_buf_t *rrbp; /* right-right buffer pointer */
  114. int rrecs=0; /* right record count */
  115. xfs_bmbt_rec_t *rrp; /* right record pointer */
  116. xfs_btree_cur_t *tcur; /* temporary btree cursor */
  117. int numrecs; /* temporary numrec count */
  118. int numlrecs, numrrecs;
  119. XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
  120. XFS_BMBT_TRACE_ARGI(cur, level);
  121. ptr = cur->bc_ptrs[level];
  122. tcur = NULL;
  123. if (ptr == 0) {
  124. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  125. *stat = 0;
  126. return 0;
  127. }
  128. block = xfs_bmbt_get_block(cur, level, &bp);
  129. numrecs = be16_to_cpu(block->bb_numrecs);
  130. #ifdef DEBUG
  131. if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
  132. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  133. goto error0;
  134. }
  135. #endif
  136. if (ptr > numrecs) {
  137. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  138. *stat = 0;
  139. return 0;
  140. }
  141. XFS_STATS_INC(xs_bmbt_delrec);
  142. if (level > 0) {
  143. kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
  144. pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
  145. #ifdef DEBUG
  146. for (i = ptr; i < numrecs; i++) {
  147. if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) {
  148. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  149. goto error0;
  150. }
  151. }
  152. #endif
  153. if (ptr < numrecs) {
  154. memmove(&kp[ptr - 1], &kp[ptr],
  155. (numrecs - ptr) * sizeof(*kp));
  156. memmove(&pp[ptr - 1], &pp[ptr],
  157. (numrecs - ptr) * sizeof(*pp));
  158. xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs - 1);
  159. xfs_bmbt_log_keys(cur, bp, ptr, numrecs - 1);
  160. }
  161. } else {
  162. rp = XFS_BMAP_REC_IADDR(block, 1, cur);
  163. if (ptr < numrecs) {
  164. memmove(&rp[ptr - 1], &rp[ptr],
  165. (numrecs - ptr) * sizeof(*rp));
  166. xfs_bmbt_log_recs(cur, bp, ptr, numrecs - 1);
  167. }
  168. if (ptr == 1) {
  169. key.br_startoff =
  170. cpu_to_be64(xfs_bmbt_disk_get_startoff(rp));
  171. kp = &key;
  172. }
  173. }
  174. numrecs--;
  175. block->bb_numrecs = cpu_to_be16(numrecs);
  176. xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
  177. /*
  178. * We're at the root level.
  179. * First, shrink the root block in-memory.
  180. * Try to get rid of the next level down.
  181. * If we can't then there's nothing left to do.
  182. */
  183. if (level == cur->bc_nlevels - 1) {
  184. xfs_iroot_realloc(cur->bc_private.b.ip, -1,
  185. cur->bc_private.b.whichfork);
  186. if ((error = xfs_btree_kill_iroot(cur))) {
  187. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  188. goto error0;
  189. }
  190. if (level > 0 && (error = xfs_btree_decrement(cur, level, &j))) {
  191. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  192. goto error0;
  193. }
  194. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  195. *stat = 1;
  196. return 0;
  197. }
  198. if (ptr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)kp, level + 1))) {
  199. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  200. goto error0;
  201. }
  202. if (numrecs >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
  203. if (level > 0 && (error = xfs_btree_decrement(cur, level, &j))) {
  204. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  205. goto error0;
  206. }
  207. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  208. *stat = 1;
  209. return 0;
  210. }
  211. rbno = be64_to_cpu(block->bb_rightsib);
  212. lbno = be64_to_cpu(block->bb_leftsib);
  213. /*
  214. * One child of root, need to get a chance to copy its contents
  215. * into the root and delete it. Can't go up to next level,
  216. * there's nothing to delete there.
  217. */
  218. if (lbno == NULLFSBLOCK && rbno == NULLFSBLOCK &&
  219. level == cur->bc_nlevels - 2) {
  220. if ((error = xfs_btree_kill_iroot(cur))) {
  221. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  222. goto error0;
  223. }
  224. if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) {
  225. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  226. goto error0;
  227. }
  228. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  229. *stat = 1;
  230. return 0;
  231. }
  232. ASSERT(rbno != NULLFSBLOCK || lbno != NULLFSBLOCK);
  233. if ((error = xfs_btree_dup_cursor(cur, &tcur))) {
  234. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  235. goto error0;
  236. }
  237. bno = NULLFSBLOCK;
  238. if (rbno != NULLFSBLOCK) {
  239. i = xfs_btree_lastrec(tcur, level);
  240. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  241. if ((error = xfs_btree_increment(tcur, level, &i))) {
  242. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  243. goto error0;
  244. }
  245. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  246. i = xfs_btree_lastrec(tcur, level);
  247. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  248. rbp = tcur->bc_bufs[level];
  249. right = XFS_BUF_TO_BMBT_BLOCK(rbp);
  250. #ifdef DEBUG
  251. if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
  252. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  253. goto error0;
  254. }
  255. #endif
  256. bno = be64_to_cpu(right->bb_leftsib);
  257. if (be16_to_cpu(right->bb_numrecs) - 1 >=
  258. XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
  259. if ((error = xfs_btree_lshift(tcur, level, &i))) {
  260. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  261. goto error0;
  262. }
  263. if (i) {
  264. ASSERT(be16_to_cpu(block->bb_numrecs) >=
  265. XFS_BMAP_BLOCK_IMINRECS(level, tcur));
  266. xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
  267. tcur = NULL;
  268. if (level > 0) {
  269. if ((error = xfs_btree_decrement(cur,
  270. level, &i))) {
  271. XFS_BMBT_TRACE_CURSOR(cur,
  272. ERROR);
  273. goto error0;
  274. }
  275. }
  276. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  277. *stat = 1;
  278. return 0;
  279. }
  280. }
  281. rrecs = be16_to_cpu(right->bb_numrecs);
  282. if (lbno != NULLFSBLOCK) {
  283. i = xfs_btree_firstrec(tcur, level);
  284. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  285. if ((error = xfs_btree_decrement(tcur, level, &i))) {
  286. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  287. goto error0;
  288. }
  289. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  290. }
  291. }
  292. if (lbno != NULLFSBLOCK) {
  293. i = xfs_btree_firstrec(tcur, level);
  294. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  295. /*
  296. * decrement to last in block
  297. */
  298. if ((error = xfs_btree_decrement(tcur, level, &i))) {
  299. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  300. goto error0;
  301. }
  302. i = xfs_btree_firstrec(tcur, level);
  303. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  304. lbp = tcur->bc_bufs[level];
  305. left = XFS_BUF_TO_BMBT_BLOCK(lbp);
  306. #ifdef DEBUG
  307. if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
  308. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  309. goto error0;
  310. }
  311. #endif
  312. bno = be64_to_cpu(left->bb_rightsib);
  313. if (be16_to_cpu(left->bb_numrecs) - 1 >=
  314. XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
  315. if ((error = xfs_btree_rshift(tcur, level, &i))) {
  316. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  317. goto error0;
  318. }
  319. if (i) {
  320. ASSERT(be16_to_cpu(block->bb_numrecs) >=
  321. XFS_BMAP_BLOCK_IMINRECS(level, tcur));
  322. xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
  323. tcur = NULL;
  324. if (level == 0)
  325. cur->bc_ptrs[0]++;
  326. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  327. *stat = 1;
  328. return 0;
  329. }
  330. }
  331. lrecs = be16_to_cpu(left->bb_numrecs);
  332. }
  333. xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
  334. tcur = NULL;
  335. mp = cur->bc_mp;
  336. ASSERT(bno != NULLFSBLOCK);
  337. if (lbno != NULLFSBLOCK &&
  338. lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
  339. rbno = bno;
  340. right = block;
  341. rbp = bp;
  342. if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, lbno, 0, &lbp,
  343. XFS_BMAP_BTREE_REF))) {
  344. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  345. goto error0;
  346. }
  347. left = XFS_BUF_TO_BMBT_BLOCK(lbp);
  348. if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
  349. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  350. goto error0;
  351. }
  352. } else if (rbno != NULLFSBLOCK &&
  353. rrecs + be16_to_cpu(block->bb_numrecs) <=
  354. XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
  355. lbno = bno;
  356. left = block;
  357. lbp = bp;
  358. if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, rbno, 0, &rbp,
  359. XFS_BMAP_BTREE_REF))) {
  360. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  361. goto error0;
  362. }
  363. right = XFS_BUF_TO_BMBT_BLOCK(rbp);
  364. if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
  365. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  366. goto error0;
  367. }
  368. lrecs = be16_to_cpu(left->bb_numrecs);
  369. } else {
  370. if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) {
  371. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  372. goto error0;
  373. }
  374. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  375. *stat = 1;
  376. return 0;
  377. }
  378. numlrecs = be16_to_cpu(left->bb_numrecs);
  379. numrrecs = be16_to_cpu(right->bb_numrecs);
  380. if (level > 0) {
  381. lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur);
  382. lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur);
  383. rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
  384. rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
  385. #ifdef DEBUG
  386. for (i = 0; i < numrrecs; i++) {
  387. if ((error = xfs_btree_check_lptr_disk(cur, rpp[i], level))) {
  388. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  389. goto error0;
  390. }
  391. }
  392. #endif
  393. memcpy(lkp, rkp, numrrecs * sizeof(*lkp));
  394. memcpy(lpp, rpp, numrrecs * sizeof(*lpp));
  395. xfs_bmbt_log_keys(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
  396. xfs_bmbt_log_ptrs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
  397. } else {
  398. lrp = XFS_BMAP_REC_IADDR(left, numlrecs + 1, cur);
  399. rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
  400. memcpy(lrp, rrp, numrrecs * sizeof(*lrp));
  401. xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
  402. }
  403. be16_add_cpu(&left->bb_numrecs, numrrecs);
  404. left->bb_rightsib = right->bb_rightsib;
  405. xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS);
  406. if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) {
  407. if ((error = xfs_btree_read_bufl(mp, cur->bc_tp,
  408. be64_to_cpu(left->bb_rightsib),
  409. 0, &rrbp, XFS_BMAP_BTREE_REF))) {
  410. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  411. goto error0;
  412. }
  413. rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp);
  414. if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) {
  415. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  416. goto error0;
  417. }
  418. rrblock->bb_leftsib = cpu_to_be64(lbno);
  419. xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
  420. }
  421. xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1,
  422. cur->bc_private.b.flist, mp);
  423. cur->bc_private.b.ip->i_d.di_nblocks--;
  424. xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
  425. XFS_TRANS_MOD_DQUOT_BYINO(mp, cur->bc_tp, cur->bc_private.b.ip,
  426. XFS_TRANS_DQ_BCOUNT, -1L);
  427. xfs_trans_binval(cur->bc_tp, rbp);
  428. if (bp != lbp) {
  429. cur->bc_bufs[level] = lbp;
  430. cur->bc_ptrs[level] += lrecs;
  431. cur->bc_ra[level] = 0;
  432. } else if ((error = xfs_btree_increment(cur, level + 1, &i))) {
  433. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  434. goto error0;
  435. }
  436. if (level > 0)
  437. cur->bc_ptrs[level]--;
  438. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  439. *stat = 2;
  440. return 0;
  441. error0:
  442. if (tcur)
  443. xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
  444. return error;
  445. }
  446. /*
  447. * Log key values from the btree block.
  448. */
  449. STATIC void
  450. xfs_bmbt_log_keys(
  451. xfs_btree_cur_t *cur,
  452. xfs_buf_t *bp,
  453. int kfirst,
  454. int klast)
  455. {
  456. xfs_trans_t *tp;
  457. XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
  458. XFS_BMBT_TRACE_ARGBII(cur, bp, kfirst, klast);
  459. tp = cur->bc_tp;
  460. if (bp) {
  461. xfs_bmbt_block_t *block;
  462. int first;
  463. xfs_bmbt_key_t *kp;
  464. int last;
  465. block = XFS_BUF_TO_BMBT_BLOCK(bp);
  466. kp = XFS_BMAP_KEY_DADDR(block, 1, cur);
  467. first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
  468. last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
  469. xfs_trans_log_buf(tp, bp, first, last);
  470. } else {
  471. xfs_inode_t *ip;
  472. ip = cur->bc_private.b.ip;
  473. xfs_trans_log_inode(tp, ip,
  474. XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
  475. }
  476. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  477. }
  478. /*
  479. * Log pointer values from the btree block.
  480. */
  481. STATIC void
  482. xfs_bmbt_log_ptrs(
  483. xfs_btree_cur_t *cur,
  484. xfs_buf_t *bp,
  485. int pfirst,
  486. int plast)
  487. {
  488. xfs_trans_t *tp;
  489. XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
  490. XFS_BMBT_TRACE_ARGBII(cur, bp, pfirst, plast);
  491. tp = cur->bc_tp;
  492. if (bp) {
  493. xfs_bmbt_block_t *block;
  494. int first;
  495. int last;
  496. xfs_bmbt_ptr_t *pp;
  497. block = XFS_BUF_TO_BMBT_BLOCK(bp);
  498. pp = XFS_BMAP_PTR_DADDR(block, 1, cur);
  499. first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
  500. last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
  501. xfs_trans_log_buf(tp, bp, first, last);
  502. } else {
  503. xfs_inode_t *ip;
  504. ip = cur->bc_private.b.ip;
  505. xfs_trans_log_inode(tp, ip,
  506. XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
  507. }
  508. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  509. }
  510. /*
  511. * Determine the extent state.
  512. */
  513. /* ARGSUSED */
  514. STATIC xfs_exntst_t
  515. xfs_extent_state(
  516. xfs_filblks_t blks,
  517. int extent_flag)
  518. {
  519. if (extent_flag) {
  520. ASSERT(blks != 0); /* saved for DMIG */
  521. return XFS_EXT_UNWRITTEN;
  522. }
  523. return XFS_EXT_NORM;
  524. }
  525. /*
  526. * Convert on-disk form of btree root to in-memory form.
  527. */
  528. void
  529. xfs_bmdr_to_bmbt(
  530. xfs_bmdr_block_t *dblock,
  531. int dblocklen,
  532. xfs_bmbt_block_t *rblock,
  533. int rblocklen)
  534. {
  535. int dmxr;
  536. xfs_bmbt_key_t *fkp;
  537. __be64 *fpp;
  538. xfs_bmbt_key_t *tkp;
  539. __be64 *tpp;
  540. rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
  541. rblock->bb_level = dblock->bb_level;
  542. ASSERT(be16_to_cpu(rblock->bb_level) > 0);
  543. rblock->bb_numrecs = dblock->bb_numrecs;
  544. rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
  545. rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
  546. dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
  547. fkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
  548. tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
  549. fpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
  550. tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
  551. dmxr = be16_to_cpu(dblock->bb_numrecs);
  552. memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
  553. memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
  554. }
  555. /*
  556. * Delete the record pointed to by cur.
  557. */
  558. int /* error */
  559. xfs_bmbt_delete(
  560. xfs_btree_cur_t *cur,
  561. int *stat) /* success/failure */
  562. {
  563. int error; /* error return value */
  564. int i;
  565. int level;
  566. XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
  567. for (level = 0, i = 2; i == 2; level++) {
  568. if ((error = xfs_bmbt_delrec(cur, level, &i))) {
  569. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  570. return error;
  571. }
  572. }
  573. if (i == 0) {
  574. for (level = 1; level < cur->bc_nlevels; level++) {
  575. if (cur->bc_ptrs[level] == 0) {
  576. if ((error = xfs_btree_decrement(cur, level,
  577. &i))) {
  578. XFS_BMBT_TRACE_CURSOR(cur, ERROR);
  579. return error;
  580. }
  581. break;
  582. }
  583. }
  584. }
  585. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  586. *stat = i;
  587. return 0;
  588. }
  589. /*
  590. * Convert a compressed bmap extent record to an uncompressed form.
  591. * This code must be in sync with the routines xfs_bmbt_get_startoff,
  592. * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
  593. */
  594. STATIC_INLINE void
  595. __xfs_bmbt_get_all(
  596. __uint64_t l0,
  597. __uint64_t l1,
  598. xfs_bmbt_irec_t *s)
  599. {
  600. int ext_flag;
  601. xfs_exntst_t st;
  602. ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
  603. s->br_startoff = ((xfs_fileoff_t)l0 &
  604. XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
  605. #if XFS_BIG_BLKNOS
  606. s->br_startblock = (((xfs_fsblock_t)l0 & XFS_MASK64LO(9)) << 43) |
  607. (((xfs_fsblock_t)l1) >> 21);
  608. #else
  609. #ifdef DEBUG
  610. {
  611. xfs_dfsbno_t b;
  612. b = (((xfs_dfsbno_t)l0 & XFS_MASK64LO(9)) << 43) |
  613. (((xfs_dfsbno_t)l1) >> 21);
  614. ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
  615. s->br_startblock = (xfs_fsblock_t)b;
  616. }
  617. #else /* !DEBUG */
  618. s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
  619. #endif /* DEBUG */
  620. #endif /* XFS_BIG_BLKNOS */
  621. s->br_blockcount = (xfs_filblks_t)(l1 & XFS_MASK64LO(21));
  622. /* This is xfs_extent_state() in-line */
  623. if (ext_flag) {
  624. ASSERT(s->br_blockcount != 0); /* saved for DMIG */
  625. st = XFS_EXT_UNWRITTEN;
  626. } else
  627. st = XFS_EXT_NORM;
  628. s->br_state = st;
  629. }
  630. void
  631. xfs_bmbt_get_all(
  632. xfs_bmbt_rec_host_t *r,
  633. xfs_bmbt_irec_t *s)
  634. {
  635. __xfs_bmbt_get_all(r->l0, r->l1, s);
  636. }
  637. /*
  638. * Get the block pointer for the given level of the cursor.
  639. * Fill in the buffer pointer, if applicable.
  640. */
  641. xfs_bmbt_block_t *
  642. xfs_bmbt_get_block(
  643. xfs_btree_cur_t *cur,
  644. int level,
  645. xfs_buf_t **bpp)
  646. {
  647. xfs_ifork_t *ifp;
  648. xfs_bmbt_block_t *rval;
  649. if (level < cur->bc_nlevels - 1) {
  650. *bpp = cur->bc_bufs[level];
  651. rval = XFS_BUF_TO_BMBT_BLOCK(*bpp);
  652. } else {
  653. *bpp = NULL;
  654. ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
  655. cur->bc_private.b.whichfork);
  656. rval = ifp->if_broot;
  657. }
  658. return rval;
  659. }
  660. /*
  661. * Extract the blockcount field from an in memory bmap extent record.
  662. */
  663. xfs_filblks_t
  664. xfs_bmbt_get_blockcount(
  665. xfs_bmbt_rec_host_t *r)
  666. {
  667. return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21));
  668. }
  669. /*
  670. * Extract the startblock field from an in memory bmap extent record.
  671. */
  672. xfs_fsblock_t
  673. xfs_bmbt_get_startblock(
  674. xfs_bmbt_rec_host_t *r)
  675. {
  676. #if XFS_BIG_BLKNOS
  677. return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) |
  678. (((xfs_fsblock_t)r->l1) >> 21);
  679. #else
  680. #ifdef DEBUG
  681. xfs_dfsbno_t b;
  682. b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) |
  683. (((xfs_dfsbno_t)r->l1) >> 21);
  684. ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
  685. return (xfs_fsblock_t)b;
  686. #else /* !DEBUG */
  687. return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
  688. #endif /* DEBUG */
  689. #endif /* XFS_BIG_BLKNOS */
  690. }
  691. /*
  692. * Extract the startoff field from an in memory bmap extent record.
  693. */
  694. xfs_fileoff_t
  695. xfs_bmbt_get_startoff(
  696. xfs_bmbt_rec_host_t *r)
  697. {
  698. return ((xfs_fileoff_t)r->l0 &
  699. XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
  700. }
  701. xfs_exntst_t
  702. xfs_bmbt_get_state(
  703. xfs_bmbt_rec_host_t *r)
  704. {
  705. int ext_flag;
  706. ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
  707. return xfs_extent_state(xfs_bmbt_get_blockcount(r),
  708. ext_flag);
  709. }
  710. /* Endian flipping versions of the bmbt extraction functions */
  711. void
  712. xfs_bmbt_disk_get_all(
  713. xfs_bmbt_rec_t *r,
  714. xfs_bmbt_irec_t *s)
  715. {
  716. __xfs_bmbt_get_all(be64_to_cpu(r->l0), be64_to_cpu(r->l1), s);
  717. }
  718. /*
  719. * Extract the blockcount field from an on disk bmap extent record.
  720. */
  721. xfs_filblks_t
  722. xfs_bmbt_disk_get_blockcount(
  723. xfs_bmbt_rec_t *r)
  724. {
  725. return (xfs_filblks_t)(be64_to_cpu(r->l1) & XFS_MASK64LO(21));
  726. }
  727. /*
  728. * Extract the startoff field from a disk format bmap extent record.
  729. */
  730. xfs_fileoff_t
  731. xfs_bmbt_disk_get_startoff(
  732. xfs_bmbt_rec_t *r)
  733. {
  734. return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
  735. XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
  736. }
  737. /*
  738. * Log fields from the btree block header.
  739. */
  740. void
  741. xfs_bmbt_log_block(
  742. xfs_btree_cur_t *cur,
  743. xfs_buf_t *bp,
  744. int fields)
  745. {
  746. int first;
  747. int last;
  748. xfs_trans_t *tp;
  749. static const short offsets[] = {
  750. offsetof(xfs_bmbt_block_t, bb_magic),
  751. offsetof(xfs_bmbt_block_t, bb_level),
  752. offsetof(xfs_bmbt_block_t, bb_numrecs),
  753. offsetof(xfs_bmbt_block_t, bb_leftsib),
  754. offsetof(xfs_bmbt_block_t, bb_rightsib),
  755. sizeof(xfs_bmbt_block_t)
  756. };
  757. XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
  758. XFS_BMBT_TRACE_ARGBI(cur, bp, fields);
  759. tp = cur->bc_tp;
  760. if (bp) {
  761. xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first,
  762. &last);
  763. xfs_trans_log_buf(tp, bp, first, last);
  764. } else
  765. xfs_trans_log_inode(tp, cur->bc_private.b.ip,
  766. XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
  767. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  768. }
  769. /*
  770. * Log record values from the btree block.
  771. */
  772. void
  773. xfs_bmbt_log_recs(
  774. xfs_btree_cur_t *cur,
  775. xfs_buf_t *bp,
  776. int rfirst,
  777. int rlast)
  778. {
  779. xfs_bmbt_block_t *block;
  780. int first;
  781. int last;
  782. xfs_bmbt_rec_t *rp;
  783. xfs_trans_t *tp;
  784. XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
  785. XFS_BMBT_TRACE_ARGBII(cur, bp, rfirst, rlast);
  786. ASSERT(bp);
  787. tp = cur->bc_tp;
  788. block = XFS_BUF_TO_BMBT_BLOCK(bp);
  789. rp = XFS_BMAP_REC_DADDR(block, 1, cur);
  790. first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
  791. last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
  792. xfs_trans_log_buf(tp, bp, first, last);
  793. XFS_BMBT_TRACE_CURSOR(cur, EXIT);
  794. }
  795. /*
  796. * Set all the fields in a bmap extent record from the arguments.
  797. */
  798. void
  799. xfs_bmbt_set_allf(
  800. xfs_bmbt_rec_host_t *r,
  801. xfs_fileoff_t startoff,
  802. xfs_fsblock_t startblock,
  803. xfs_filblks_t blockcount,
  804. xfs_exntst_t state)
  805. {
  806. int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
  807. ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
  808. ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
  809. ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
  810. #if XFS_BIG_BLKNOS
  811. ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
  812. r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
  813. ((xfs_bmbt_rec_base_t)startoff << 9) |
  814. ((xfs_bmbt_rec_base_t)startblock >> 43);
  815. r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
  816. ((xfs_bmbt_rec_base_t)blockcount &
  817. (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
  818. #else /* !XFS_BIG_BLKNOS */
  819. if (ISNULLSTARTBLOCK(startblock)) {
  820. r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
  821. ((xfs_bmbt_rec_base_t)startoff << 9) |
  822. (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
  823. r->l1 = XFS_MASK64HI(11) |
  824. ((xfs_bmbt_rec_base_t)startblock << 21) |
  825. ((xfs_bmbt_rec_base_t)blockcount &
  826. (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
  827. } else {
  828. r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
  829. ((xfs_bmbt_rec_base_t)startoff << 9);
  830. r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
  831. ((xfs_bmbt_rec_base_t)blockcount &
  832. (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
  833. }
  834. #endif /* XFS_BIG_BLKNOS */
  835. }
  836. /*
  837. * Set all the fields in a bmap extent record from the uncompressed form.
  838. */
  839. void
  840. xfs_bmbt_set_all(
  841. xfs_bmbt_rec_host_t *r,
  842. xfs_bmbt_irec_t *s)
  843. {
  844. xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock,
  845. s->br_blockcount, s->br_state);
  846. }
  847. /*
  848. * Set all the fields in a disk format bmap extent record from the arguments.
  849. */
  850. void
  851. xfs_bmbt_disk_set_allf(
  852. xfs_bmbt_rec_t *r,
  853. xfs_fileoff_t startoff,
  854. xfs_fsblock_t startblock,
  855. xfs_filblks_t blockcount,
  856. xfs_exntst_t state)
  857. {
  858. int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
  859. ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
  860. ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
  861. ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
  862. #if XFS_BIG_BLKNOS
  863. ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
  864. r->l0 = cpu_to_be64(
  865. ((xfs_bmbt_rec_base_t)extent_flag << 63) |
  866. ((xfs_bmbt_rec_base_t)startoff << 9) |
  867. ((xfs_bmbt_rec_base_t)startblock >> 43));
  868. r->l1 = cpu_to_be64(
  869. ((xfs_bmbt_rec_base_t)startblock << 21) |
  870. ((xfs_bmbt_rec_base_t)blockcount &
  871. (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
  872. #else /* !XFS_BIG_BLKNOS */
  873. if (ISNULLSTARTBLOCK(startblock)) {
  874. r->l0 = cpu_to_be64(
  875. ((xfs_bmbt_rec_base_t)extent_flag << 63) |
  876. ((xfs_bmbt_rec_base_t)startoff << 9) |
  877. (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
  878. r->l1 = cpu_to_be64(XFS_MASK64HI(11) |
  879. ((xfs_bmbt_rec_base_t)startblock << 21) |
  880. ((xfs_bmbt_rec_base_t)blockcount &
  881. (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
  882. } else {
  883. r->l0 = cpu_to_be64(
  884. ((xfs_bmbt_rec_base_t)extent_flag << 63) |
  885. ((xfs_bmbt_rec_base_t)startoff << 9));
  886. r->l1 = cpu_to_be64(
  887. ((xfs_bmbt_rec_base_t)startblock << 21) |
  888. ((xfs_bmbt_rec_base_t)blockcount &
  889. (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
  890. }
  891. #endif /* XFS_BIG_BLKNOS */
  892. }
  893. /*
  894. * Set all the fields in a bmap extent record from the uncompressed form.
  895. */
  896. void
  897. xfs_bmbt_disk_set_all(
  898. xfs_bmbt_rec_t *r,
  899. xfs_bmbt_irec_t *s)
  900. {
  901. xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock,
  902. s->br_blockcount, s->br_state);
  903. }
  904. /*
  905. * Set the blockcount field in a bmap extent record.
  906. */
  907. void
  908. xfs_bmbt_set_blockcount(
  909. xfs_bmbt_rec_host_t *r,
  910. xfs_filblks_t v)
  911. {
  912. ASSERT((v & XFS_MASK64HI(43)) == 0);
  913. r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) |
  914. (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21));
  915. }
  916. /*
  917. * Set the startblock field in a bmap extent record.
  918. */
  919. void
  920. xfs_bmbt_set_startblock(
  921. xfs_bmbt_rec_host_t *r,
  922. xfs_fsblock_t v)
  923. {
  924. #if XFS_BIG_BLKNOS
  925. ASSERT((v & XFS_MASK64HI(12)) == 0);
  926. r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) |
  927. (xfs_bmbt_rec_base_t)(v >> 43);
  928. r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) |
  929. (xfs_bmbt_rec_base_t)(v << 21);
  930. #else /* !XFS_BIG_BLKNOS */
  931. if (ISNULLSTARTBLOCK(v)) {
  932. r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
  933. r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) |
  934. ((xfs_bmbt_rec_base_t)v << 21) |
  935. (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
  936. } else {
  937. r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
  938. r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
  939. (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
  940. }
  941. #endif /* XFS_BIG_BLKNOS */
  942. }
  943. /*
  944. * Set the startoff field in a bmap extent record.
  945. */
  946. void
  947. xfs_bmbt_set_startoff(
  948. xfs_bmbt_rec_host_t *r,
  949. xfs_fileoff_t v)
  950. {
  951. ASSERT((v & XFS_MASK64HI(9)) == 0);
  952. r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) |
  953. ((xfs_bmbt_rec_base_t)v << 9) |
  954. (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
  955. }
  956. /*
  957. * Set the extent state field in a bmap extent record.
  958. */
  959. void
  960. xfs_bmbt_set_state(
  961. xfs_bmbt_rec_host_t *r,
  962. xfs_exntst_t v)
  963. {
  964. ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
  965. if (v == XFS_EXT_NORM)
  966. r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN);
  967. else
  968. r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN);
  969. }
  970. /*
  971. * Convert in-memory form of btree root to on-disk form.
  972. */
  973. void
  974. xfs_bmbt_to_bmdr(
  975. xfs_bmbt_block_t *rblock,
  976. int rblocklen,
  977. xfs_bmdr_block_t *dblock,
  978. int dblocklen)
  979. {
  980. int dmxr;
  981. xfs_bmbt_key_t *fkp;
  982. __be64 *fpp;
  983. xfs_bmbt_key_t *tkp;
  984. __be64 *tpp;
  985. ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
  986. ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO);
  987. ASSERT(be64_to_cpu(rblock->bb_rightsib) == NULLDFSBNO);
  988. ASSERT(be16_to_cpu(rblock->bb_level) > 0);
  989. dblock->bb_level = rblock->bb_level;
  990. dblock->bb_numrecs = rblock->bb_numrecs;
  991. dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
  992. fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
  993. tkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
  994. fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
  995. tpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
  996. dmxr = be16_to_cpu(dblock->bb_numrecs);
  997. memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
  998. memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
  999. }
  1000. /*
  1001. * Check extent records, which have just been read, for
  1002. * any bit in the extent flag field. ASSERT on debug
  1003. * kernels, as this condition should not occur.
  1004. * Return an error condition (1) if any flags found,
  1005. * otherwise return 0.
  1006. */
  1007. int
  1008. xfs_check_nostate_extents(
  1009. xfs_ifork_t *ifp,
  1010. xfs_extnum_t idx,
  1011. xfs_extnum_t num)
  1012. {
  1013. for (; num > 0; num--, idx++) {
  1014. xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
  1015. if ((ep->l0 >>
  1016. (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
  1017. ASSERT(0);
  1018. return 1;
  1019. }
  1020. }
  1021. return 0;
  1022. }
  1023. STATIC struct xfs_btree_cur *
  1024. xfs_bmbt_dup_cursor(
  1025. struct xfs_btree_cur *cur)
  1026. {
  1027. struct xfs_btree_cur *new;
  1028. new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
  1029. cur->bc_private.b.ip, cur->bc_private.b.whichfork);
  1030. /*
  1031. * Copy the firstblock, flist, and flags values,
  1032. * since init cursor doesn't get them.
  1033. */
  1034. new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
  1035. new->bc_private.b.flist = cur->bc_private.b.flist;
  1036. new->bc_private.b.flags = cur->bc_private.b.flags;
  1037. return new;
  1038. }
  1039. STATIC void
  1040. xfs_bmbt_update_cursor(
  1041. struct xfs_btree_cur *src,
  1042. struct xfs_btree_cur *dst)
  1043. {
  1044. ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
  1045. (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
  1046. ASSERT(dst->bc_private.b.flist == src->bc_private.b.flist);
  1047. dst->bc_private.b.allocated += src->bc_private.b.allocated;
  1048. dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
  1049. src->bc_private.b.allocated = 0;
  1050. }
  1051. STATIC int
  1052. xfs_bmbt_alloc_block(
  1053. struct xfs_btree_cur *cur,
  1054. union xfs_btree_ptr *start,
  1055. union xfs_btree_ptr *new,
  1056. int length,
  1057. int *stat)
  1058. {
  1059. xfs_alloc_arg_t args; /* block allocation args */
  1060. int error; /* error return value */
  1061. memset(&args, 0, sizeof(args));
  1062. args.tp = cur->bc_tp;
  1063. args.mp = cur->bc_mp;
  1064. args.fsbno = cur->bc_private.b.firstblock;
  1065. args.firstblock = args.fsbno;
  1066. if (args.fsbno == NULLFSBLOCK) {
  1067. args.fsbno = be64_to_cpu(start->l);
  1068. args.type = XFS_ALLOCTYPE_START_BNO;
  1069. /*
  1070. * Make sure there is sufficient room left in the AG to
  1071. * complete a full tree split for an extent insert. If
  1072. * we are converting the middle part of an extent then
  1073. * we may need space for two tree splits.
  1074. *
  1075. * We are relying on the caller to make the correct block
  1076. * reservation for this operation to succeed. If the
  1077. * reservation amount is insufficient then we may fail a
  1078. * block allocation here and corrupt the filesystem.
  1079. */
  1080. args.minleft = xfs_trans_get_block_res(args.tp);
  1081. } else if (cur->bc_private.b.flist->xbf_low) {
  1082. args.type = XFS_ALLOCTYPE_START_BNO;
  1083. } else {
  1084. args.type = XFS_ALLOCTYPE_NEAR_BNO;
  1085. }
  1086. args.minlen = args.maxlen = args.prod = 1;
  1087. args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
  1088. if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
  1089. error = XFS_ERROR(ENOSPC);
  1090. goto error0;
  1091. }
  1092. error = xfs_alloc_vextent(&args);
  1093. if (error)
  1094. goto error0;
  1095. if (args.fsbno == NULLFSBLOCK && args.minleft) {
  1096. /*
  1097. * Could not find an AG with enough free space to satisfy
  1098. * a full btree split. Try again without minleft and if
  1099. * successful activate the lowspace algorithm.
  1100. */
  1101. args.fsbno = 0;
  1102. args.type = XFS_ALLOCTYPE_FIRST_AG;
  1103. args.minleft = 0;
  1104. error = xfs_alloc_vextent(&args);
  1105. if (error)
  1106. goto error0;
  1107. cur->bc_private.b.flist->xbf_low = 1;
  1108. }
  1109. if (args.fsbno == NULLFSBLOCK) {
  1110. XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
  1111. *stat = 0;
  1112. return 0;
  1113. }
  1114. ASSERT(args.len == 1);
  1115. cur->bc_private.b.firstblock = args.fsbno;
  1116. cur->bc_private.b.allocated++;
  1117. cur->bc_private.b.ip->i_d.di_nblocks++;
  1118. xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
  1119. XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
  1120. XFS_TRANS_DQ_BCOUNT, 1L);
  1121. new->l = cpu_to_be64(args.fsbno);
  1122. XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
  1123. *stat = 1;
  1124. return 0;
  1125. error0:
  1126. XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
  1127. return error;
  1128. }
  1129. STATIC int
  1130. xfs_bmbt_free_block(
  1131. struct xfs_btree_cur *cur,
  1132. struct xfs_buf *bp)
  1133. {
  1134. struct xfs_mount *mp = cur->bc_mp;
  1135. struct xfs_inode *ip = cur->bc_private.b.ip;
  1136. struct xfs_trans *tp = cur->bc_tp;
  1137. xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
  1138. xfs_bmap_add_free(fsbno, 1, cur->bc_private.b.flist, mp);
  1139. ip->i_d.di_nblocks--;
  1140. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1141. XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
  1142. xfs_trans_binval(tp, bp);
  1143. return 0;
  1144. }
  1145. STATIC int
  1146. xfs_bmbt_get_maxrecs(
  1147. struct xfs_btree_cur *cur,
  1148. int level)
  1149. {
  1150. return XFS_BMAP_BLOCK_IMAXRECS(level, cur);
  1151. }
  1152. /*
  1153. * Get the maximum records we could store in the on-disk format.
  1154. *
  1155. * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
  1156. * for the root node this checks the available space in the dinode fork
  1157. * so that we can resize the in-memory buffer to match it. After a
  1158. * resize to the maximum size this function returns the same value
  1159. * as xfs_bmbt_get_maxrecs for the root node, too.
  1160. */
  1161. STATIC int
  1162. xfs_bmbt_get_dmaxrecs(
  1163. struct xfs_btree_cur *cur,
  1164. int level)
  1165. {
  1166. return XFS_BMAP_BLOCK_DMAXRECS(level, cur);
  1167. }
  1168. STATIC void
  1169. xfs_bmbt_init_key_from_rec(
  1170. union xfs_btree_key *key,
  1171. union xfs_btree_rec *rec)
  1172. {
  1173. key->bmbt.br_startoff =
  1174. cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
  1175. }
  1176. STATIC void
  1177. xfs_bmbt_init_rec_from_key(
  1178. union xfs_btree_key *key,
  1179. union xfs_btree_rec *rec)
  1180. {
  1181. ASSERT(key->bmbt.br_startoff != 0);
  1182. xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff),
  1183. 0, 0, XFS_EXT_NORM);
  1184. }
  1185. STATIC void
  1186. xfs_bmbt_init_rec_from_cur(
  1187. struct xfs_btree_cur *cur,
  1188. union xfs_btree_rec *rec)
  1189. {
  1190. xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
  1191. }
  1192. STATIC void
  1193. xfs_bmbt_init_ptr_from_cur(
  1194. struct xfs_btree_cur *cur,
  1195. union xfs_btree_ptr *ptr)
  1196. {
  1197. ptr->l = 0;
  1198. }
  1199. STATIC __int64_t
  1200. xfs_bmbt_key_diff(
  1201. struct xfs_btree_cur *cur,
  1202. union xfs_btree_key *key)
  1203. {
  1204. return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) -
  1205. cur->bc_rec.b.br_startoff;
  1206. }
  1207. #ifdef XFS_BTREE_TRACE
  1208. ktrace_t *xfs_bmbt_trace_buf;
  1209. STATIC void
  1210. xfs_bmbt_trace_enter(
  1211. struct xfs_btree_cur *cur,
  1212. const char *func,
  1213. char *s,
  1214. int type,
  1215. int line,
  1216. __psunsigned_t a0,
  1217. __psunsigned_t a1,
  1218. __psunsigned_t a2,
  1219. __psunsigned_t a3,
  1220. __psunsigned_t a4,
  1221. __psunsigned_t a5,
  1222. __psunsigned_t a6,
  1223. __psunsigned_t a7,
  1224. __psunsigned_t a8,
  1225. __psunsigned_t a9,
  1226. __psunsigned_t a10)
  1227. {
  1228. struct xfs_inode *ip = cur->bc_private.b.ip;
  1229. int whichfork = cur->bc_private.b.whichfork;
  1230. ktrace_enter(xfs_bmbt_trace_buf,
  1231. (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
  1232. (void *)func, (void *)s, (void *)ip, (void *)cur,
  1233. (void *)a0, (void *)a1, (void *)a2, (void *)a3,
  1234. (void *)a4, (void *)a5, (void *)a6, (void *)a7,
  1235. (void *)a8, (void *)a9, (void *)a10);
  1236. ktrace_enter(ip->i_btrace,
  1237. (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
  1238. (void *)func, (void *)s, (void *)ip, (void *)cur,
  1239. (void *)a0, (void *)a1, (void *)a2, (void *)a3,
  1240. (void *)a4, (void *)a5, (void *)a6, (void *)a7,
  1241. (void *)a8, (void *)a9, (void *)a10);
  1242. }
  1243. STATIC void
  1244. xfs_bmbt_trace_cursor(
  1245. struct xfs_btree_cur *cur,
  1246. __uint32_t *s0,
  1247. __uint64_t *l0,
  1248. __uint64_t *l1)
  1249. {
  1250. struct xfs_bmbt_rec_host r;
  1251. xfs_bmbt_set_all(&r, &cur->bc_rec.b);
  1252. *s0 = (cur->bc_nlevels << 24) |
  1253. (cur->bc_private.b.flags << 16) |
  1254. cur->bc_private.b.allocated;
  1255. *l0 = r.l0;
  1256. *l1 = r.l1;
  1257. }
  1258. STATIC void
  1259. xfs_bmbt_trace_key(
  1260. struct xfs_btree_cur *cur,
  1261. union xfs_btree_key *key,
  1262. __uint64_t *l0,
  1263. __uint64_t *l1)
  1264. {
  1265. *l0 = be64_to_cpu(key->bmbt.br_startoff);
  1266. *l1 = 0;
  1267. }
  1268. STATIC void
  1269. xfs_bmbt_trace_record(
  1270. struct xfs_btree_cur *cur,
  1271. union xfs_btree_rec *rec,
  1272. __uint64_t *l0,
  1273. __uint64_t *l1,
  1274. __uint64_t *l2)
  1275. {
  1276. struct xfs_bmbt_irec irec;
  1277. xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
  1278. *l0 = irec.br_startoff;
  1279. *l1 = irec.br_startblock;
  1280. *l2 = irec.br_blockcount;
  1281. }
  1282. #endif /* XFS_BTREE_TRACE */
  1283. static const struct xfs_btree_ops xfs_bmbt_ops = {
  1284. .rec_len = sizeof(xfs_bmbt_rec_t),
  1285. .key_len = sizeof(xfs_bmbt_key_t),
  1286. .dup_cursor = xfs_bmbt_dup_cursor,
  1287. .update_cursor = xfs_bmbt_update_cursor,
  1288. .alloc_block = xfs_bmbt_alloc_block,
  1289. .free_block = xfs_bmbt_free_block,
  1290. .get_maxrecs = xfs_bmbt_get_maxrecs,
  1291. .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
  1292. .init_key_from_rec = xfs_bmbt_init_key_from_rec,
  1293. .init_rec_from_key = xfs_bmbt_init_rec_from_key,
  1294. .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
  1295. .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
  1296. .key_diff = xfs_bmbt_key_diff,
  1297. #ifdef XFS_BTREE_TRACE
  1298. .trace_enter = xfs_bmbt_trace_enter,
  1299. .trace_cursor = xfs_bmbt_trace_cursor,
  1300. .trace_key = xfs_bmbt_trace_key,
  1301. .trace_record = xfs_bmbt_trace_record,
  1302. #endif
  1303. };
  1304. /*
  1305. * Allocate a new bmap btree cursor.
  1306. */
  1307. struct xfs_btree_cur * /* new bmap btree cursor */
  1308. xfs_bmbt_init_cursor(
  1309. struct xfs_mount *mp, /* file system mount point */
  1310. struct xfs_trans *tp, /* transaction pointer */
  1311. struct xfs_inode *ip, /* inode owning the btree */
  1312. int whichfork) /* data or attr fork */
  1313. {
  1314. struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
  1315. struct xfs_btree_cur *cur;
  1316. cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
  1317. cur->bc_tp = tp;
  1318. cur->bc_mp = mp;
  1319. cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
  1320. cur->bc_btnum = XFS_BTNUM_BMAP;
  1321. cur->bc_blocklog = mp->m_sb.sb_blocklog;
  1322. cur->bc_ops = &xfs_bmbt_ops;
  1323. cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
  1324. cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
  1325. cur->bc_private.b.ip = ip;
  1326. cur->bc_private.b.firstblock = NULLFSBLOCK;
  1327. cur->bc_private.b.flist = NULL;
  1328. cur->bc_private.b.allocated = 0;
  1329. cur->bc_private.b.flags = 0;
  1330. cur->bc_private.b.whichfork = whichfork;
  1331. return cur;
  1332. }