xfs_alloc_btree.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. /*
  2. * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_types.h"
  21. #include "xfs_log.h"
  22. #include "xfs_trans.h"
  23. #include "xfs_sb.h"
  24. #include "xfs_ag.h"
  25. #include "xfs_mount.h"
  26. #include "xfs_bmap_btree.h"
  27. #include "xfs_alloc_btree.h"
  28. #include "xfs_ialloc_btree.h"
  29. #include "xfs_dinode.h"
  30. #include "xfs_inode.h"
  31. #include "xfs_btree.h"
  32. #include "xfs_alloc.h"
  33. #include "xfs_extent_busy.h"
  34. #include "xfs_error.h"
  35. #include "xfs_trace.h"
  36. #include "xfs_cksum.h"
  37. STATIC struct xfs_btree_cur *
  38. xfs_allocbt_dup_cursor(
  39. struct xfs_btree_cur *cur)
  40. {
  41. return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
  42. cur->bc_private.a.agbp, cur->bc_private.a.agno,
  43. cur->bc_btnum);
  44. }
  45. STATIC void
  46. xfs_allocbt_set_root(
  47. struct xfs_btree_cur *cur,
  48. union xfs_btree_ptr *ptr,
  49. int inc)
  50. {
  51. struct xfs_buf *agbp = cur->bc_private.a.agbp;
  52. struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
  53. xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
  54. int btnum = cur->bc_btnum;
  55. struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
  56. ASSERT(ptr->s != 0);
  57. agf->agf_roots[btnum] = ptr->s;
  58. be32_add_cpu(&agf->agf_levels[btnum], inc);
  59. pag->pagf_levels[btnum] += inc;
  60. xfs_perag_put(pag);
  61. xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
  62. }
  63. STATIC int
  64. xfs_allocbt_alloc_block(
  65. struct xfs_btree_cur *cur,
  66. union xfs_btree_ptr *start,
  67. union xfs_btree_ptr *new,
  68. int length,
  69. int *stat)
  70. {
  71. int error;
  72. xfs_agblock_t bno;
  73. XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
  74. /* Allocate the new block from the freelist. If we can't, give up. */
  75. error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
  76. &bno, 1);
  77. if (error) {
  78. XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
  79. return error;
  80. }
  81. if (bno == NULLAGBLOCK) {
  82. XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
  83. *stat = 0;
  84. return 0;
  85. }
  86. xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
  87. xfs_trans_agbtree_delta(cur->bc_tp, 1);
  88. new->s = cpu_to_be32(bno);
  89. XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
  90. *stat = 1;
  91. return 0;
  92. }
  93. STATIC int
  94. xfs_allocbt_free_block(
  95. struct xfs_btree_cur *cur,
  96. struct xfs_buf *bp)
  97. {
  98. struct xfs_buf *agbp = cur->bc_private.a.agbp;
  99. struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
  100. xfs_agblock_t bno;
  101. int error;
  102. bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
  103. error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
  104. if (error)
  105. return error;
  106. xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
  107. XFS_EXTENT_BUSY_SKIP_DISCARD);
  108. xfs_trans_agbtree_delta(cur->bc_tp, -1);
  109. xfs_trans_binval(cur->bc_tp, bp);
  110. return 0;
  111. }
  112. /*
  113. * Update the longest extent in the AGF
  114. */
  115. STATIC void
  116. xfs_allocbt_update_lastrec(
  117. struct xfs_btree_cur *cur,
  118. struct xfs_btree_block *block,
  119. union xfs_btree_rec *rec,
  120. int ptr,
  121. int reason)
  122. {
  123. struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
  124. xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
  125. struct xfs_perag *pag;
  126. __be32 len;
  127. int numrecs;
  128. ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
  129. switch (reason) {
  130. case LASTREC_UPDATE:
  131. /*
  132. * If this is the last leaf block and it's the last record,
  133. * then update the size of the longest extent in the AG.
  134. */
  135. if (ptr != xfs_btree_get_numrecs(block))
  136. return;
  137. len = rec->alloc.ar_blockcount;
  138. break;
  139. case LASTREC_INSREC:
  140. if (be32_to_cpu(rec->alloc.ar_blockcount) <=
  141. be32_to_cpu(agf->agf_longest))
  142. return;
  143. len = rec->alloc.ar_blockcount;
  144. break;
  145. case LASTREC_DELREC:
  146. numrecs = xfs_btree_get_numrecs(block);
  147. if (ptr <= numrecs)
  148. return;
  149. ASSERT(ptr == numrecs + 1);
  150. if (numrecs) {
  151. xfs_alloc_rec_t *rrp;
  152. rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
  153. len = rrp->ar_blockcount;
  154. } else {
  155. len = 0;
  156. }
  157. break;
  158. default:
  159. ASSERT(0);
  160. return;
  161. }
  162. agf->agf_longest = len;
  163. pag = xfs_perag_get(cur->bc_mp, seqno);
  164. pag->pagf_longest = be32_to_cpu(len);
  165. xfs_perag_put(pag);
  166. xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
  167. }
  168. STATIC int
  169. xfs_allocbt_get_minrecs(
  170. struct xfs_btree_cur *cur,
  171. int level)
  172. {
  173. return cur->bc_mp->m_alloc_mnr[level != 0];
  174. }
  175. STATIC int
  176. xfs_allocbt_get_maxrecs(
  177. struct xfs_btree_cur *cur,
  178. int level)
  179. {
  180. return cur->bc_mp->m_alloc_mxr[level != 0];
  181. }
  182. STATIC void
  183. xfs_allocbt_init_key_from_rec(
  184. union xfs_btree_key *key,
  185. union xfs_btree_rec *rec)
  186. {
  187. ASSERT(rec->alloc.ar_startblock != 0);
  188. key->alloc.ar_startblock = rec->alloc.ar_startblock;
  189. key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
  190. }
  191. STATIC void
  192. xfs_allocbt_init_rec_from_key(
  193. union xfs_btree_key *key,
  194. union xfs_btree_rec *rec)
  195. {
  196. ASSERT(key->alloc.ar_startblock != 0);
  197. rec->alloc.ar_startblock = key->alloc.ar_startblock;
  198. rec->alloc.ar_blockcount = key->alloc.ar_blockcount;
  199. }
  200. STATIC void
  201. xfs_allocbt_init_rec_from_cur(
  202. struct xfs_btree_cur *cur,
  203. union xfs_btree_rec *rec)
  204. {
  205. ASSERT(cur->bc_rec.a.ar_startblock != 0);
  206. rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
  207. rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
  208. }
  209. STATIC void
  210. xfs_allocbt_init_ptr_from_cur(
  211. struct xfs_btree_cur *cur,
  212. union xfs_btree_ptr *ptr)
  213. {
  214. struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
  215. ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
  216. ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
  217. ptr->s = agf->agf_roots[cur->bc_btnum];
  218. }
  219. STATIC __int64_t
  220. xfs_allocbt_key_diff(
  221. struct xfs_btree_cur *cur,
  222. union xfs_btree_key *key)
  223. {
  224. xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
  225. xfs_alloc_key_t *kp = &key->alloc;
  226. __int64_t diff;
  227. if (cur->bc_btnum == XFS_BTNUM_BNO) {
  228. return (__int64_t)be32_to_cpu(kp->ar_startblock) -
  229. rec->ar_startblock;
  230. }
  231. diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
  232. if (diff)
  233. return diff;
  234. return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
  235. }
  236. static bool
  237. xfs_allocbt_verify(
  238. struct xfs_buf *bp)
  239. {
  240. struct xfs_mount *mp = bp->b_target->bt_mount;
  241. struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
  242. struct xfs_perag *pag = bp->b_pag;
  243. unsigned int level;
  244. /*
  245. * magic number and level verification
  246. *
  247. * During growfs operations, we can't verify the exact level or owner as
  248. * the perag is not fully initialised and hence not attached to the
  249. * buffer. In this case, check against the maximum tree depth.
  250. *
  251. * Similarly, during log recovery we will have a perag structure
  252. * attached, but the agf information will not yet have been initialised
  253. * from the on disk AGF. Again, we can only check against maximum limits
  254. * in this case.
  255. */
  256. level = be16_to_cpu(block->bb_level);
  257. switch (block->bb_magic) {
  258. case cpu_to_be32(XFS_ABTB_CRC_MAGIC):
  259. if (!xfs_sb_version_hascrc(&mp->m_sb))
  260. return false;
  261. if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid))
  262. return false;
  263. if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
  264. return false;
  265. if (pag &&
  266. be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
  267. return false;
  268. /* fall through */
  269. case cpu_to_be32(XFS_ABTB_MAGIC):
  270. if (pag && pag->pagf_init) {
  271. if (level >= pag->pagf_levels[XFS_BTNUM_BNOi])
  272. return false;
  273. } else if (level >= mp->m_ag_maxlevels)
  274. return false;
  275. break;
  276. case cpu_to_be32(XFS_ABTC_CRC_MAGIC):
  277. if (!xfs_sb_version_hascrc(&mp->m_sb))
  278. return false;
  279. if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid))
  280. return false;
  281. if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
  282. return false;
  283. if (pag &&
  284. be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
  285. return false;
  286. /* fall through */
  287. case cpu_to_be32(XFS_ABTC_MAGIC):
  288. if (pag && pag->pagf_init) {
  289. if (level >= pag->pagf_levels[XFS_BTNUM_CNTi])
  290. return false;
  291. } else if (level >= mp->m_ag_maxlevels)
  292. return false;
  293. break;
  294. default:
  295. return false;
  296. }
  297. /* numrecs verification */
  298. if (be16_to_cpu(block->bb_numrecs) > mp->m_alloc_mxr[level != 0])
  299. return false;
  300. /* sibling pointer verification */
  301. if (!block->bb_u.s.bb_leftsib ||
  302. (be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks &&
  303. block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK)))
  304. return false;
  305. if (!block->bb_u.s.bb_rightsib ||
  306. (be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks &&
  307. block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK)))
  308. return false;
  309. return true;
  310. }
  311. static void
  312. xfs_allocbt_read_verify(
  313. struct xfs_buf *bp)
  314. {
  315. if (!(xfs_btree_sblock_verify_crc(bp) &&
  316. xfs_allocbt_verify(bp))) {
  317. trace_xfs_btree_corrupt(bp, _RET_IP_);
  318. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
  319. bp->b_target->bt_mount, bp->b_addr);
  320. xfs_buf_ioerror(bp, EFSCORRUPTED);
  321. }
  322. }
  323. static void
  324. xfs_allocbt_write_verify(
  325. struct xfs_buf *bp)
  326. {
  327. if (!xfs_allocbt_verify(bp)) {
  328. trace_xfs_btree_corrupt(bp, _RET_IP_);
  329. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
  330. bp->b_target->bt_mount, bp->b_addr);
  331. xfs_buf_ioerror(bp, EFSCORRUPTED);
  332. }
  333. xfs_btree_sblock_calc_crc(bp);
  334. }
  335. const struct xfs_buf_ops xfs_allocbt_buf_ops = {
  336. .verify_read = xfs_allocbt_read_verify,
  337. .verify_write = xfs_allocbt_write_verify,
  338. };
  339. #ifdef DEBUG
  340. STATIC int
  341. xfs_allocbt_keys_inorder(
  342. struct xfs_btree_cur *cur,
  343. union xfs_btree_key *k1,
  344. union xfs_btree_key *k2)
  345. {
  346. if (cur->bc_btnum == XFS_BTNUM_BNO) {
  347. return be32_to_cpu(k1->alloc.ar_startblock) <
  348. be32_to_cpu(k2->alloc.ar_startblock);
  349. } else {
  350. return be32_to_cpu(k1->alloc.ar_blockcount) <
  351. be32_to_cpu(k2->alloc.ar_blockcount) ||
  352. (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
  353. be32_to_cpu(k1->alloc.ar_startblock) <
  354. be32_to_cpu(k2->alloc.ar_startblock));
  355. }
  356. }
  357. STATIC int
  358. xfs_allocbt_recs_inorder(
  359. struct xfs_btree_cur *cur,
  360. union xfs_btree_rec *r1,
  361. union xfs_btree_rec *r2)
  362. {
  363. if (cur->bc_btnum == XFS_BTNUM_BNO) {
  364. return be32_to_cpu(r1->alloc.ar_startblock) +
  365. be32_to_cpu(r1->alloc.ar_blockcount) <=
  366. be32_to_cpu(r2->alloc.ar_startblock);
  367. } else {
  368. return be32_to_cpu(r1->alloc.ar_blockcount) <
  369. be32_to_cpu(r2->alloc.ar_blockcount) ||
  370. (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
  371. be32_to_cpu(r1->alloc.ar_startblock) <
  372. be32_to_cpu(r2->alloc.ar_startblock));
  373. }
  374. }
  375. #endif /* DEBUG */
  376. static const struct xfs_btree_ops xfs_allocbt_ops = {
  377. .rec_len = sizeof(xfs_alloc_rec_t),
  378. .key_len = sizeof(xfs_alloc_key_t),
  379. .dup_cursor = xfs_allocbt_dup_cursor,
  380. .set_root = xfs_allocbt_set_root,
  381. .alloc_block = xfs_allocbt_alloc_block,
  382. .free_block = xfs_allocbt_free_block,
  383. .update_lastrec = xfs_allocbt_update_lastrec,
  384. .get_minrecs = xfs_allocbt_get_minrecs,
  385. .get_maxrecs = xfs_allocbt_get_maxrecs,
  386. .init_key_from_rec = xfs_allocbt_init_key_from_rec,
  387. .init_rec_from_key = xfs_allocbt_init_rec_from_key,
  388. .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
  389. .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
  390. .key_diff = xfs_allocbt_key_diff,
  391. .buf_ops = &xfs_allocbt_buf_ops,
  392. #ifdef DEBUG
  393. .keys_inorder = xfs_allocbt_keys_inorder,
  394. .recs_inorder = xfs_allocbt_recs_inorder,
  395. #endif
  396. };
  397. /*
  398. * Allocate a new allocation btree cursor.
  399. */
  400. struct xfs_btree_cur * /* new alloc btree cursor */
  401. xfs_allocbt_init_cursor(
  402. struct xfs_mount *mp, /* file system mount point */
  403. struct xfs_trans *tp, /* transaction pointer */
  404. struct xfs_buf *agbp, /* buffer for agf structure */
  405. xfs_agnumber_t agno, /* allocation group number */
  406. xfs_btnum_t btnum) /* btree identifier */
  407. {
  408. struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
  409. struct xfs_btree_cur *cur;
  410. ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
  411. cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
  412. cur->bc_tp = tp;
  413. cur->bc_mp = mp;
  414. cur->bc_btnum = btnum;
  415. cur->bc_blocklog = mp->m_sb.sb_blocklog;
  416. cur->bc_ops = &xfs_allocbt_ops;
  417. if (btnum == XFS_BTNUM_CNT) {
  418. cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
  419. cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
  420. } else {
  421. cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
  422. }
  423. cur->bc_private.a.agbp = agbp;
  424. cur->bc_private.a.agno = agno;
  425. if (xfs_sb_version_hascrc(&mp->m_sb))
  426. cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
  427. return cur;
  428. }
  429. /*
  430. * Calculate number of records in an alloc btree block.
  431. */
  432. int
  433. xfs_allocbt_maxrecs(
  434. struct xfs_mount *mp,
  435. int blocklen,
  436. int leaf)
  437. {
  438. blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
  439. if (leaf)
  440. return blocklen / sizeof(xfs_alloc_rec_t);
  441. return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
  442. }