xfs_alloc_btree.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203
  1. /*
  2. * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_types.h"
  21. #include "xfs_bit.h"
  22. #include "xfs_log.h"
  23. #include "xfs_inum.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_ag.h"
  27. #include "xfs_dir.h"
  28. #include "xfs_dir2.h"
  29. #include "xfs_dmapi.h"
  30. #include "xfs_mount.h"
  31. #include "xfs_bmap_btree.h"
  32. #include "xfs_alloc_btree.h"
  33. #include "xfs_ialloc_btree.h"
  34. #include "xfs_dir_sf.h"
  35. #include "xfs_dir2_sf.h"
  36. #include "xfs_attr_sf.h"
  37. #include "xfs_dinode.h"
  38. #include "xfs_inode.h"
  39. #include "xfs_btree.h"
  40. #include "xfs_ialloc.h"
  41. #include "xfs_alloc.h"
  42. #include "xfs_error.h"
  43. /*
  44. * Prototypes for internal functions.
  45. */
  46. STATIC void xfs_alloc_log_block(xfs_trans_t *, xfs_buf_t *, int);
  47. STATIC void xfs_alloc_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
  48. STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
  49. STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
  50. STATIC int xfs_alloc_lshift(xfs_btree_cur_t *, int, int *);
  51. STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *);
  52. STATIC int xfs_alloc_rshift(xfs_btree_cur_t *, int, int *);
  53. STATIC int xfs_alloc_split(xfs_btree_cur_t *, int, xfs_agblock_t *,
  54. xfs_alloc_key_t *, xfs_btree_cur_t **, int *);
  55. STATIC int xfs_alloc_updkey(xfs_btree_cur_t *, xfs_alloc_key_t *, int);
  56. /*
  57. * Internal functions.
  58. */
  59. /*
  60. * Single level of the xfs_alloc_delete record deletion routine.
  61. * Delete record pointed to by cur/level.
  62. * Remove the record from its block then rebalance the tree.
  63. * Return 0 for error, 1 for done, 2 to go on to the next level.
  64. */
  65. STATIC int /* error */
  66. xfs_alloc_delrec(
  67. xfs_btree_cur_t *cur, /* btree cursor */
  68. int level, /* level removing record from */
  69. int *stat) /* fail/done/go-on */
  70. {
  71. xfs_agf_t *agf; /* allocation group freelist header */
  72. xfs_alloc_block_t *block; /* btree block record/key lives in */
  73. xfs_agblock_t bno; /* btree block number */
  74. xfs_buf_t *bp; /* buffer for block */
  75. int error; /* error return value */
  76. int i; /* loop index */
  77. xfs_alloc_key_t key; /* kp points here if block is level 0 */
  78. xfs_agblock_t lbno; /* left block's block number */
  79. xfs_buf_t *lbp; /* left block's buffer pointer */
  80. xfs_alloc_block_t *left; /* left btree block */
  81. xfs_alloc_key_t *lkp=NULL; /* left block key pointer */
  82. xfs_alloc_ptr_t *lpp=NULL; /* left block address pointer */
  83. int lrecs=0; /* number of records in left block */
  84. xfs_alloc_rec_t *lrp; /* left block record pointer */
  85. xfs_mount_t *mp; /* mount structure */
  86. int ptr; /* index in btree block for this rec */
  87. xfs_agblock_t rbno; /* right block's block number */
  88. xfs_buf_t *rbp; /* right block's buffer pointer */
  89. xfs_alloc_block_t *right; /* right btree block */
  90. xfs_alloc_key_t *rkp; /* right block key pointer */
  91. xfs_alloc_ptr_t *rpp; /* right block address pointer */
  92. int rrecs=0; /* number of records in right block */
  93. xfs_alloc_rec_t *rrp; /* right block record pointer */
  94. xfs_btree_cur_t *tcur; /* temporary btree cursor */
  95. /*
  96. * Get the index of the entry being deleted, check for nothing there.
  97. */
  98. ptr = cur->bc_ptrs[level];
  99. if (ptr == 0) {
  100. *stat = 0;
  101. return 0;
  102. }
  103. /*
  104. * Get the buffer & block containing the record or key/ptr.
  105. */
  106. bp = cur->bc_bufs[level];
  107. block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  108. #ifdef DEBUG
  109. if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
  110. return error;
  111. #endif
  112. /*
  113. * Fail if we're off the end of the block.
  114. */
  115. if (ptr > be16_to_cpu(block->bb_numrecs)) {
  116. *stat = 0;
  117. return 0;
  118. }
  119. XFS_STATS_INC(xs_abt_delrec);
  120. /*
  121. * It's a nonleaf. Excise the key and ptr being deleted, by
  122. * sliding the entries past them down one.
  123. * Log the changed areas of the block.
  124. */
  125. if (level > 0) {
  126. lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
  127. lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
  128. #ifdef DEBUG
  129. for (i = ptr; i < be16_to_cpu(block->bb_numrecs); i++) {
  130. if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
  131. return error;
  132. }
  133. #endif
  134. if (ptr < be16_to_cpu(block->bb_numrecs)) {
  135. memmove(&lkp[ptr - 1], &lkp[ptr],
  136. (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lkp));
  137. memmove(&lpp[ptr - 1], &lpp[ptr],
  138. (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lpp));
  139. xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1);
  140. xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1);
  141. }
  142. }
  143. /*
  144. * It's a leaf. Excise the record being deleted, by sliding the
  145. * entries past it down one. Log the changed areas of the block.
  146. */
  147. else {
  148. lrp = XFS_ALLOC_REC_ADDR(block, 1, cur);
  149. if (ptr < be16_to_cpu(block->bb_numrecs)) {
  150. memmove(&lrp[ptr - 1], &lrp[ptr],
  151. (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lrp));
  152. xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1);
  153. }
  154. /*
  155. * If it's the first record in the block, we'll need a key
  156. * structure to pass up to the next level (updkey).
  157. */
  158. if (ptr == 1) {
  159. key.ar_startblock = lrp->ar_startblock;
  160. key.ar_blockcount = lrp->ar_blockcount;
  161. lkp = &key;
  162. }
  163. }
  164. /*
  165. * Decrement and log the number of entries in the block.
  166. */
  167. be16_add(&block->bb_numrecs, -1);
  168. xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
  169. /*
  170. * See if the longest free extent in the allocation group was
  171. * changed by this operation. True if it's the by-size btree, and
  172. * this is the leaf level, and there is no right sibling block,
  173. * and this was the last record.
  174. */
  175. agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
  176. mp = cur->bc_mp;
  177. if (level == 0 &&
  178. cur->bc_btnum == XFS_BTNUM_CNT &&
  179. be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
  180. ptr > be16_to_cpu(block->bb_numrecs)) {
  181. ASSERT(ptr == be16_to_cpu(block->bb_numrecs) + 1);
  182. /*
  183. * There are still records in the block. Grab the size
  184. * from the last one.
  185. */
  186. if (be16_to_cpu(block->bb_numrecs)) {
  187. rrp = XFS_ALLOC_REC_ADDR(block, be16_to_cpu(block->bb_numrecs), cur);
  188. agf->agf_longest = rrp->ar_blockcount;
  189. }
  190. /*
  191. * No free extents left.
  192. */
  193. else
  194. agf->agf_longest = 0;
  195. mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest =
  196. be32_to_cpu(agf->agf_longest);
  197. xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
  198. XFS_AGF_LONGEST);
  199. }
  200. /*
  201. * Is this the root level? If so, we're almost done.
  202. */
  203. if (level == cur->bc_nlevels - 1) {
  204. /*
  205. * If this is the root level,
  206. * and there's only one entry left,
  207. * and it's NOT the leaf level,
  208. * then we can get rid of this level.
  209. */
  210. if (be16_to_cpu(block->bb_numrecs) == 1 && level > 0) {
  211. /*
  212. * lpp is still set to the first pointer in the block.
  213. * Make it the new root of the btree.
  214. */
  215. bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]);
  216. agf->agf_roots[cur->bc_btnum] = *lpp;
  217. be32_add(&agf->agf_levels[cur->bc_btnum], -1);
  218. mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--;
  219. /*
  220. * Put this buffer/block on the ag's freelist.
  221. */
  222. if ((error = xfs_alloc_put_freelist(cur->bc_tp,
  223. cur->bc_private.a.agbp, NULL, bno)))
  224. return error;
  225. /*
  226. * Since blocks move to the free list without the
  227. * coordination used in xfs_bmap_finish, we can't allow
  228. * block to be available for reallocation and
  229. * non-transaction writing (user data) until we know
  230. * that the transaction that moved it to the free list
  231. * is permanently on disk. We track the blocks by
  232. * declaring these blocks as "busy"; the busy list is
  233. * maintained on a per-ag basis and each transaction
  234. * records which entries should be removed when the
  235. * iclog commits to disk. If a busy block is
  236. * allocated, the iclog is pushed up to the LSN
  237. * that freed the block.
  238. */
  239. xfs_alloc_mark_busy(cur->bc_tp,
  240. be32_to_cpu(agf->agf_seqno), bno, 1);
  241. xfs_trans_agbtree_delta(cur->bc_tp, -1);
  242. xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
  243. XFS_AGF_ROOTS | XFS_AGF_LEVELS);
  244. /*
  245. * Update the cursor so there's one fewer level.
  246. */
  247. xfs_btree_setbuf(cur, level, NULL);
  248. cur->bc_nlevels--;
  249. } else if (level > 0 &&
  250. (error = xfs_alloc_decrement(cur, level, &i)))
  251. return error;
  252. *stat = 1;
  253. return 0;
  254. }
  255. /*
  256. * If we deleted the leftmost entry in the block, update the
  257. * key values above us in the tree.
  258. */
  259. if (ptr == 1 && (error = xfs_alloc_updkey(cur, lkp, level + 1)))
  260. return error;
  261. /*
  262. * If the number of records remaining in the block is at least
  263. * the minimum, we're done.
  264. */
  265. if (be16_to_cpu(block->bb_numrecs) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
  266. if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i)))
  267. return error;
  268. *stat = 1;
  269. return 0;
  270. }
  271. /*
  272. * Otherwise, we have to move some records around to keep the
  273. * tree balanced. Look at the left and right sibling blocks to
  274. * see if we can re-balance by moving only one record.
  275. */
  276. rbno = be32_to_cpu(block->bb_rightsib);
  277. lbno = be32_to_cpu(block->bb_leftsib);
  278. bno = NULLAGBLOCK;
  279. ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK);
  280. /*
  281. * Duplicate the cursor so our btree manipulations here won't
  282. * disrupt the next level up.
  283. */
  284. if ((error = xfs_btree_dup_cursor(cur, &tcur)))
  285. return error;
  286. /*
  287. * If there's a right sibling, see if it's ok to shift an entry
  288. * out of it.
  289. */
  290. if (rbno != NULLAGBLOCK) {
  291. /*
  292. * Move the temp cursor to the last entry in the next block.
  293. * Actually any entry but the first would suffice.
  294. */
  295. i = xfs_btree_lastrec(tcur, level);
  296. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  297. if ((error = xfs_alloc_increment(tcur, level, &i)))
  298. goto error0;
  299. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  300. i = xfs_btree_lastrec(tcur, level);
  301. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  302. /*
  303. * Grab a pointer to the block.
  304. */
  305. rbp = tcur->bc_bufs[level];
  306. right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
  307. #ifdef DEBUG
  308. if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
  309. goto error0;
  310. #endif
  311. /*
  312. * Grab the current block number, for future use.
  313. */
  314. bno = be32_to_cpu(right->bb_leftsib);
  315. /*
  316. * If right block is full enough so that removing one entry
  317. * won't make it too empty, and left-shifting an entry out
  318. * of right to us works, we're done.
  319. */
  320. if (be16_to_cpu(right->bb_numrecs) - 1 >=
  321. XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
  322. if ((error = xfs_alloc_lshift(tcur, level, &i)))
  323. goto error0;
  324. if (i) {
  325. ASSERT(be16_to_cpu(block->bb_numrecs) >=
  326. XFS_ALLOC_BLOCK_MINRECS(level, cur));
  327. xfs_btree_del_cursor(tcur,
  328. XFS_BTREE_NOERROR);
  329. if (level > 0 &&
  330. (error = xfs_alloc_decrement(cur, level,
  331. &i)))
  332. return error;
  333. *stat = 1;
  334. return 0;
  335. }
  336. }
  337. /*
  338. * Otherwise, grab the number of records in right for
  339. * future reference, and fix up the temp cursor to point
  340. * to our block again (last record).
  341. */
  342. rrecs = be16_to_cpu(right->bb_numrecs);
  343. if (lbno != NULLAGBLOCK) {
  344. i = xfs_btree_firstrec(tcur, level);
  345. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  346. if ((error = xfs_alloc_decrement(tcur, level, &i)))
  347. goto error0;
  348. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  349. }
  350. }
  351. /*
  352. * If there's a left sibling, see if it's ok to shift an entry
  353. * out of it.
  354. */
  355. if (lbno != NULLAGBLOCK) {
  356. /*
  357. * Move the temp cursor to the first entry in the
  358. * previous block.
  359. */
  360. i = xfs_btree_firstrec(tcur, level);
  361. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  362. if ((error = xfs_alloc_decrement(tcur, level, &i)))
  363. goto error0;
  364. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  365. xfs_btree_firstrec(tcur, level);
  366. /*
  367. * Grab a pointer to the block.
  368. */
  369. lbp = tcur->bc_bufs[level];
  370. left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
  371. #ifdef DEBUG
  372. if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
  373. goto error0;
  374. #endif
  375. /*
  376. * Grab the current block number, for future use.
  377. */
  378. bno = be32_to_cpu(left->bb_rightsib);
  379. /*
  380. * If left block is full enough so that removing one entry
  381. * won't make it too empty, and right-shifting an entry out
  382. * of left to us works, we're done.
  383. */
  384. if (be16_to_cpu(left->bb_numrecs) - 1 >=
  385. XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
  386. if ((error = xfs_alloc_rshift(tcur, level, &i)))
  387. goto error0;
  388. if (i) {
  389. ASSERT(be16_to_cpu(block->bb_numrecs) >=
  390. XFS_ALLOC_BLOCK_MINRECS(level, cur));
  391. xfs_btree_del_cursor(tcur,
  392. XFS_BTREE_NOERROR);
  393. if (level == 0)
  394. cur->bc_ptrs[0]++;
  395. *stat = 1;
  396. return 0;
  397. }
  398. }
  399. /*
  400. * Otherwise, grab the number of records in right for
  401. * future reference.
  402. */
  403. lrecs = be16_to_cpu(left->bb_numrecs);
  404. }
  405. /*
  406. * Delete the temp cursor, we're done with it.
  407. */
  408. xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
  409. /*
  410. * If here, we need to do a join to keep the tree balanced.
  411. */
  412. ASSERT(bno != NULLAGBLOCK);
  413. /*
  414. * See if we can join with the left neighbor block.
  415. */
  416. if (lbno != NULLAGBLOCK &&
  417. lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
  418. /*
  419. * Set "right" to be the starting block,
  420. * "left" to be the left neighbor.
  421. */
  422. rbno = bno;
  423. right = block;
  424. rbp = bp;
  425. if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
  426. cur->bc_private.a.agno, lbno, 0, &lbp,
  427. XFS_ALLOC_BTREE_REF)))
  428. return error;
  429. left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
  430. if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
  431. return error;
  432. }
  433. /*
  434. * If that won't work, see if we can join with the right neighbor block.
  435. */
  436. else if (rbno != NULLAGBLOCK &&
  437. rrecs + be16_to_cpu(block->bb_numrecs) <=
  438. XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
  439. /*
  440. * Set "left" to be the starting block,
  441. * "right" to be the right neighbor.
  442. */
  443. lbno = bno;
  444. left = block;
  445. lbp = bp;
  446. if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
  447. cur->bc_private.a.agno, rbno, 0, &rbp,
  448. XFS_ALLOC_BTREE_REF)))
  449. return error;
  450. right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
  451. if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
  452. return error;
  453. }
  454. /*
  455. * Otherwise, we can't fix the imbalance.
  456. * Just return. This is probably a logic error, but it's not fatal.
  457. */
  458. else {
  459. if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i)))
  460. return error;
  461. *stat = 1;
  462. return 0;
  463. }
  464. /*
  465. * We're now going to join "left" and "right" by moving all the stuff
  466. * in "right" to "left" and deleting "right".
  467. */
  468. if (level > 0) {
  469. /*
  470. * It's a non-leaf. Move keys and pointers.
  471. */
  472. lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur);
  473. lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur);
  474. rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
  475. rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
  476. #ifdef DEBUG
  477. for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
  478. if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
  479. return error;
  480. }
  481. #endif
  482. memcpy(lkp, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*lkp));
  483. memcpy(lpp, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*lpp));
  484. xfs_alloc_log_keys(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1,
  485. be16_to_cpu(left->bb_numrecs) +
  486. be16_to_cpu(right->bb_numrecs));
  487. xfs_alloc_log_ptrs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1,
  488. be16_to_cpu(left->bb_numrecs) +
  489. be16_to_cpu(right->bb_numrecs));
  490. } else {
  491. /*
  492. * It's a leaf. Move records.
  493. */
  494. lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur);
  495. rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
  496. memcpy(lrp, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*lrp));
  497. xfs_alloc_log_recs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1,
  498. be16_to_cpu(left->bb_numrecs) +
  499. be16_to_cpu(right->bb_numrecs));
  500. }
  501. /*
  502. * If we joined with the left neighbor, set the buffer in the
  503. * cursor to the left block, and fix up the index.
  504. */
  505. if (bp != lbp) {
  506. xfs_btree_setbuf(cur, level, lbp);
  507. cur->bc_ptrs[level] += be16_to_cpu(left->bb_numrecs);
  508. }
  509. /*
  510. * If we joined with the right neighbor and there's a level above
  511. * us, increment the cursor at that level.
  512. */
  513. else if (level + 1 < cur->bc_nlevels &&
  514. (error = xfs_alloc_increment(cur, level + 1, &i)))
  515. return error;
  516. /*
  517. * Fix up the number of records in the surviving block.
  518. */
  519. be16_add(&left->bb_numrecs, be16_to_cpu(right->bb_numrecs));
  520. /*
  521. * Fix up the right block pointer in the surviving block, and log it.
  522. */
  523. left->bb_rightsib = right->bb_rightsib;
  524. xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
  525. /*
  526. * If there is a right sibling now, make it point to the
  527. * remaining block.
  528. */
  529. if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) {
  530. xfs_alloc_block_t *rrblock;
  531. xfs_buf_t *rrbp;
  532. if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
  533. cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0,
  534. &rrbp, XFS_ALLOC_BTREE_REF)))
  535. return error;
  536. rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp);
  537. if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
  538. return error;
  539. rrblock->bb_leftsib = cpu_to_be32(lbno);
  540. xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
  541. }
  542. /*
  543. * Free the deleting block by putting it on the freelist.
  544. */
  545. if ((error = xfs_alloc_put_freelist(cur->bc_tp, cur->bc_private.a.agbp,
  546. NULL, rbno)))
  547. return error;
  548. /*
  549. * Since blocks move to the free list without the coordination
  550. * used in xfs_bmap_finish, we can't allow block to be available
  551. * for reallocation and non-transaction writing (user data)
  552. * until we know that the transaction that moved it to the free
  553. * list is permanently on disk. We track the blocks by declaring
  554. * these blocks as "busy"; the busy list is maintained on a
  555. * per-ag basis and each transaction records which entries
  556. * should be removed when the iclog commits to disk. If a
  557. * busy block is allocated, the iclog is pushed up to the
  558. * LSN that freed the block.
  559. */
  560. xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1);
  561. xfs_trans_agbtree_delta(cur->bc_tp, -1);
  562. /*
  563. * Adjust the current level's cursor so that we're left referring
  564. * to the right node, after we're done.
  565. * If this leaves the ptr value 0 our caller will fix it up.
  566. */
  567. if (level > 0)
  568. cur->bc_ptrs[level]--;
  569. /*
  570. * Return value means the next level up has something to do.
  571. */
  572. *stat = 2;
  573. return 0;
  574. error0:
  575. xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
  576. return error;
  577. }
  578. /*
  579. * Insert one record/level. Return information to the caller
  580. * allowing the next level up to proceed if necessary.
  581. */
  582. STATIC int /* error */
  583. xfs_alloc_insrec(
  584. xfs_btree_cur_t *cur, /* btree cursor */
  585. int level, /* level to insert record at */
  586. xfs_agblock_t *bnop, /* i/o: block number inserted */
  587. xfs_alloc_rec_t *recp, /* i/o: record data inserted */
  588. xfs_btree_cur_t **curp, /* output: new cursor replacing cur */
  589. int *stat) /* output: success/failure */
  590. {
  591. xfs_agf_t *agf; /* allocation group freelist header */
  592. xfs_alloc_block_t *block; /* btree block record/key lives in */
  593. xfs_buf_t *bp; /* buffer for block */
  594. int error; /* error return value */
  595. int i; /* loop index */
  596. xfs_alloc_key_t key; /* key value being inserted */
  597. xfs_alloc_key_t *kp; /* pointer to btree keys */
  598. xfs_agblock_t nbno; /* block number of allocated block */
  599. xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */
  600. xfs_alloc_key_t nkey; /* new key value, from split */
  601. xfs_alloc_rec_t nrec; /* new record value, for caller */
  602. int optr; /* old ptr value */
  603. xfs_alloc_ptr_t *pp; /* pointer to btree addresses */
  604. int ptr; /* index in btree block for this rec */
  605. xfs_alloc_rec_t *rp; /* pointer to btree records */
  606. ASSERT(be32_to_cpu(recp->ar_blockcount) > 0);
  607. /*
  608. * GCC doesn't understand the (arguably complex) control flow in
  609. * this function and complains about uninitialized structure fields
  610. * without this.
  611. */
  612. memset(&nrec, 0, sizeof(nrec));
  613. /*
  614. * If we made it to the root level, allocate a new root block
  615. * and we're done.
  616. */
  617. if (level >= cur->bc_nlevels) {
  618. XFS_STATS_INC(xs_abt_insrec);
  619. if ((error = xfs_alloc_newroot(cur, &i)))
  620. return error;
  621. *bnop = NULLAGBLOCK;
  622. *stat = i;
  623. return 0;
  624. }
  625. /*
  626. * Make a key out of the record data to be inserted, and save it.
  627. */
  628. key.ar_startblock = recp->ar_startblock;
  629. key.ar_blockcount = recp->ar_blockcount;
  630. optr = ptr = cur->bc_ptrs[level];
  631. /*
  632. * If we're off the left edge, return failure.
  633. */
  634. if (ptr == 0) {
  635. *stat = 0;
  636. return 0;
  637. }
  638. XFS_STATS_INC(xs_abt_insrec);
  639. /*
  640. * Get pointers to the btree buffer and block.
  641. */
  642. bp = cur->bc_bufs[level];
  643. block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  644. #ifdef DEBUG
  645. if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
  646. return error;
  647. /*
  648. * Check that the new entry is being inserted in the right place.
  649. */
  650. if (ptr <= be16_to_cpu(block->bb_numrecs)) {
  651. if (level == 0) {
  652. rp = XFS_ALLOC_REC_ADDR(block, ptr, cur);
  653. xfs_btree_check_rec(cur->bc_btnum, recp, rp);
  654. } else {
  655. kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur);
  656. xfs_btree_check_key(cur->bc_btnum, &key, kp);
  657. }
  658. }
  659. #endif
  660. nbno = NULLAGBLOCK;
  661. ncur = (xfs_btree_cur_t *)0;
  662. /*
  663. * If the block is full, we can't insert the new entry until we
  664. * make the block un-full.
  665. */
  666. if (be16_to_cpu(block->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
  667. /*
  668. * First, try shifting an entry to the right neighbor.
  669. */
  670. if ((error = xfs_alloc_rshift(cur, level, &i)))
  671. return error;
  672. if (i) {
  673. /* nothing */
  674. }
  675. /*
  676. * Next, try shifting an entry to the left neighbor.
  677. */
  678. else {
  679. if ((error = xfs_alloc_lshift(cur, level, &i)))
  680. return error;
  681. if (i)
  682. optr = ptr = cur->bc_ptrs[level];
  683. else {
  684. /*
  685. * Next, try splitting the current block in
  686. * half. If this works we have to re-set our
  687. * variables because we could be in a
  688. * different block now.
  689. */
  690. if ((error = xfs_alloc_split(cur, level, &nbno,
  691. &nkey, &ncur, &i)))
  692. return error;
  693. if (i) {
  694. bp = cur->bc_bufs[level];
  695. block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  696. #ifdef DEBUG
  697. if ((error =
  698. xfs_btree_check_sblock(cur,
  699. block, level, bp)))
  700. return error;
  701. #endif
  702. ptr = cur->bc_ptrs[level];
  703. nrec.ar_startblock = nkey.ar_startblock;
  704. nrec.ar_blockcount = nkey.ar_blockcount;
  705. }
  706. /*
  707. * Otherwise the insert fails.
  708. */
  709. else {
  710. *stat = 0;
  711. return 0;
  712. }
  713. }
  714. }
  715. }
  716. /*
  717. * At this point we know there's room for our new entry in the block
  718. * we're pointing at.
  719. */
  720. if (level > 0) {
  721. /*
  722. * It's a non-leaf entry. Make a hole for the new data
  723. * in the key and ptr regions of the block.
  724. */
  725. kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
  726. pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
  727. #ifdef DEBUG
  728. for (i = be16_to_cpu(block->bb_numrecs); i >= ptr; i--) {
  729. if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level)))
  730. return error;
  731. }
  732. #endif
  733. memmove(&kp[ptr], &kp[ptr - 1],
  734. (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*kp));
  735. memmove(&pp[ptr], &pp[ptr - 1],
  736. (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*pp));
  737. #ifdef DEBUG
  738. if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
  739. return error;
  740. #endif
  741. /*
  742. * Now stuff the new data in, bump numrecs and log the new data.
  743. */
  744. kp[ptr - 1] = key;
  745. pp[ptr - 1] = cpu_to_be32(*bnop);
  746. be16_add(&block->bb_numrecs, 1);
  747. xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs));
  748. xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs));
  749. #ifdef DEBUG
  750. if (ptr < be16_to_cpu(block->bb_numrecs))
  751. xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1,
  752. kp + ptr);
  753. #endif
  754. } else {
  755. /*
  756. * It's a leaf entry. Make a hole for the new record.
  757. */
  758. rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
  759. memmove(&rp[ptr], &rp[ptr - 1],
  760. (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*rp));
  761. /*
  762. * Now stuff the new record in, bump numrecs
  763. * and log the new data.
  764. */
  765. rp[ptr - 1] = *recp; /* INT_: struct copy */
  766. be16_add(&block->bb_numrecs, 1);
  767. xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs));
  768. #ifdef DEBUG
  769. if (ptr < be16_to_cpu(block->bb_numrecs))
  770. xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1,
  771. rp + ptr);
  772. #endif
  773. }
  774. /*
  775. * Log the new number of records in the btree header.
  776. */
  777. xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
  778. /*
  779. * If we inserted at the start of a block, update the parents' keys.
  780. */
  781. if (optr == 1 && (error = xfs_alloc_updkey(cur, &key, level + 1)))
  782. return error;
  783. /*
  784. * Look to see if the longest extent in the allocation group
  785. * needs to be updated.
  786. */
  787. agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
  788. if (level == 0 &&
  789. cur->bc_btnum == XFS_BTNUM_CNT &&
  790. be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
  791. be32_to_cpu(recp->ar_blockcount) > be32_to_cpu(agf->agf_longest)) {
  792. /*
  793. * If this is a leaf in the by-size btree and there
  794. * is no right sibling block and this block is bigger
  795. * than the previous longest block, update it.
  796. */
  797. agf->agf_longest = recp->ar_blockcount;
  798. cur->bc_mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest
  799. = be32_to_cpu(recp->ar_blockcount);
  800. xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
  801. XFS_AGF_LONGEST);
  802. }
  803. /*
  804. * Return the new block number, if any.
  805. * If there is one, give back a record value and a cursor too.
  806. */
  807. *bnop = nbno;
  808. if (nbno != NULLAGBLOCK) {
  809. *recp = nrec; /* INT_: struct copy */
  810. *curp = ncur; /* INT_: struct copy */
  811. }
  812. *stat = 1;
  813. return 0;
  814. }
  815. /*
  816. * Log header fields from a btree block.
  817. */
  818. STATIC void
  819. xfs_alloc_log_block(
  820. xfs_trans_t *tp, /* transaction pointer */
  821. xfs_buf_t *bp, /* buffer containing btree block */
  822. int fields) /* mask of fields: XFS_BB_... */
  823. {
  824. int first; /* first byte offset logged */
  825. int last; /* last byte offset logged */
  826. static const short offsets[] = { /* table of offsets */
  827. offsetof(xfs_alloc_block_t, bb_magic),
  828. offsetof(xfs_alloc_block_t, bb_level),
  829. offsetof(xfs_alloc_block_t, bb_numrecs),
  830. offsetof(xfs_alloc_block_t, bb_leftsib),
  831. offsetof(xfs_alloc_block_t, bb_rightsib),
  832. sizeof(xfs_alloc_block_t)
  833. };
  834. xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, &last);
  835. xfs_trans_log_buf(tp, bp, first, last);
  836. }
  837. /*
  838. * Log keys from a btree block (nonleaf).
  839. */
  840. STATIC void
  841. xfs_alloc_log_keys(
  842. xfs_btree_cur_t *cur, /* btree cursor */
  843. xfs_buf_t *bp, /* buffer containing btree block */
  844. int kfirst, /* index of first key to log */
  845. int klast) /* index of last key to log */
  846. {
  847. xfs_alloc_block_t *block; /* btree block to log from */
  848. int first; /* first byte offset logged */
  849. xfs_alloc_key_t *kp; /* key pointer in btree block */
  850. int last; /* last byte offset logged */
  851. block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  852. kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
  853. first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
  854. last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
  855. xfs_trans_log_buf(cur->bc_tp, bp, first, last);
  856. }
  857. /*
  858. * Log block pointer fields from a btree block (nonleaf).
  859. */
  860. STATIC void
  861. xfs_alloc_log_ptrs(
  862. xfs_btree_cur_t *cur, /* btree cursor */
  863. xfs_buf_t *bp, /* buffer containing btree block */
  864. int pfirst, /* index of first pointer to log */
  865. int plast) /* index of last pointer to log */
  866. {
  867. xfs_alloc_block_t *block; /* btree block to log from */
  868. int first; /* first byte offset logged */
  869. int last; /* last byte offset logged */
  870. xfs_alloc_ptr_t *pp; /* block-pointer pointer in btree blk */
  871. block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  872. pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
  873. first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
  874. last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
  875. xfs_trans_log_buf(cur->bc_tp, bp, first, last);
  876. }
  877. /*
  878. * Log records from a btree block (leaf).
  879. */
  880. STATIC void
  881. xfs_alloc_log_recs(
  882. xfs_btree_cur_t *cur, /* btree cursor */
  883. xfs_buf_t *bp, /* buffer containing btree block */
  884. int rfirst, /* index of first record to log */
  885. int rlast) /* index of last record to log */
  886. {
  887. xfs_alloc_block_t *block; /* btree block to log from */
  888. int first; /* first byte offset logged */
  889. int last; /* last byte offset logged */
  890. xfs_alloc_rec_t *rp; /* record pointer for btree block */
  891. block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  892. rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
  893. #ifdef DEBUG
  894. {
  895. xfs_agf_t *agf;
  896. xfs_alloc_rec_t *p;
  897. agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
  898. for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++)
  899. ASSERT(be32_to_cpu(p->ar_startblock) +
  900. be32_to_cpu(p->ar_blockcount) <=
  901. be32_to_cpu(agf->agf_length));
  902. }
  903. #endif
  904. first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
  905. last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
  906. xfs_trans_log_buf(cur->bc_tp, bp, first, last);
  907. }
  908. /*
  909. * Lookup the record. The cursor is made to point to it, based on dir.
  910. * Return 0 if can't find any such record, 1 for success.
  911. */
  912. STATIC int /* error */
  913. xfs_alloc_lookup(
  914. xfs_btree_cur_t *cur, /* btree cursor */
  915. xfs_lookup_t dir, /* <=, ==, or >= */
  916. int *stat) /* success/failure */
  917. {
  918. xfs_agblock_t agbno; /* a.g. relative btree block number */
  919. xfs_agnumber_t agno; /* allocation group number */
  920. xfs_alloc_block_t *block=NULL; /* current btree block */
  921. int diff; /* difference for the current key */
  922. int error; /* error return value */
  923. int keyno=0; /* current key number */
  924. int level; /* level in the btree */
  925. xfs_mount_t *mp; /* file system mount point */
  926. XFS_STATS_INC(xs_abt_lookup);
  927. /*
  928. * Get the allocation group header, and the root block number.
  929. */
  930. mp = cur->bc_mp;
  931. {
  932. xfs_agf_t *agf; /* a.g. freespace header */
  933. agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
  934. agno = be32_to_cpu(agf->agf_seqno);
  935. agbno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]);
  936. }
  937. /*
  938. * Iterate over each level in the btree, starting at the root.
  939. * For each level above the leaves, find the key we need, based
  940. * on the lookup record, then follow the corresponding block
  941. * pointer down to the next level.
  942. */
  943. for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
  944. xfs_buf_t *bp; /* buffer pointer for btree block */
  945. xfs_daddr_t d; /* disk address of btree block */
  946. /*
  947. * Get the disk address we're looking for.
  948. */
  949. d = XFS_AGB_TO_DADDR(mp, agno, agbno);
  950. /*
  951. * If the old buffer at this level is for a different block,
  952. * throw it away, otherwise just use it.
  953. */
  954. bp = cur->bc_bufs[level];
  955. if (bp && XFS_BUF_ADDR(bp) != d)
  956. bp = (xfs_buf_t *)0;
  957. if (!bp) {
  958. /*
  959. * Need to get a new buffer. Read it, then
  960. * set it in the cursor, releasing the old one.
  961. */
  962. if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, agno,
  963. agbno, 0, &bp, XFS_ALLOC_BTREE_REF)))
  964. return error;
  965. xfs_btree_setbuf(cur, level, bp);
  966. /*
  967. * Point to the btree block, now that we have the buffer
  968. */
  969. block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  970. if ((error = xfs_btree_check_sblock(cur, block, level,
  971. bp)))
  972. return error;
  973. } else
  974. block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  975. /*
  976. * If we already had a key match at a higher level, we know
  977. * we need to use the first entry in this block.
  978. */
  979. if (diff == 0)
  980. keyno = 1;
  981. /*
  982. * Otherwise we need to search this block. Do a binary search.
  983. */
  984. else {
  985. int high; /* high entry number */
  986. xfs_alloc_key_t *kkbase=NULL;/* base of keys in block */
  987. xfs_alloc_rec_t *krbase=NULL;/* base of records in block */
  988. int low; /* low entry number */
  989. /*
  990. * Get a pointer to keys or records.
  991. */
  992. if (level > 0)
  993. kkbase = XFS_ALLOC_KEY_ADDR(block, 1, cur);
  994. else
  995. krbase = XFS_ALLOC_REC_ADDR(block, 1, cur);
  996. /*
  997. * Set low and high entry numbers, 1-based.
  998. */
  999. low = 1;
  1000. if (!(high = be16_to_cpu(block->bb_numrecs))) {
  1001. /*
  1002. * If the block is empty, the tree must
  1003. * be an empty leaf.
  1004. */
  1005. ASSERT(level == 0 && cur->bc_nlevels == 1);
  1006. cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
  1007. *stat = 0;
  1008. return 0;
  1009. }
  1010. /*
  1011. * Binary search the block.
  1012. */
  1013. while (low <= high) {
  1014. xfs_extlen_t blockcount; /* key value */
  1015. xfs_agblock_t startblock; /* key value */
  1016. XFS_STATS_INC(xs_abt_compare);
  1017. /*
  1018. * keyno is average of low and high.
  1019. */
  1020. keyno = (low + high) >> 1;
  1021. /*
  1022. * Get startblock & blockcount.
  1023. */
  1024. if (level > 0) {
  1025. xfs_alloc_key_t *kkp;
  1026. kkp = kkbase + keyno - 1;
  1027. startblock = be32_to_cpu(kkp->ar_startblock);
  1028. blockcount = be32_to_cpu(kkp->ar_blockcount);
  1029. } else {
  1030. xfs_alloc_rec_t *krp;
  1031. krp = krbase + keyno - 1;
  1032. startblock = be32_to_cpu(krp->ar_startblock);
  1033. blockcount = be32_to_cpu(krp->ar_blockcount);
  1034. }
  1035. /*
  1036. * Compute difference to get next direction.
  1037. */
  1038. if (cur->bc_btnum == XFS_BTNUM_BNO)
  1039. diff = (int)startblock -
  1040. (int)cur->bc_rec.a.ar_startblock;
  1041. else if (!(diff = (int)blockcount -
  1042. (int)cur->bc_rec.a.ar_blockcount))
  1043. diff = (int)startblock -
  1044. (int)cur->bc_rec.a.ar_startblock;
  1045. /*
  1046. * Less than, move right.
  1047. */
  1048. if (diff < 0)
  1049. low = keyno + 1;
  1050. /*
  1051. * Greater than, move left.
  1052. */
  1053. else if (diff > 0)
  1054. high = keyno - 1;
  1055. /*
  1056. * Equal, we're done.
  1057. */
  1058. else
  1059. break;
  1060. }
  1061. }
  1062. /*
  1063. * If there are more levels, set up for the next level
  1064. * by getting the block number and filling in the cursor.
  1065. */
  1066. if (level > 0) {
  1067. /*
  1068. * If we moved left, need the previous key number,
  1069. * unless there isn't one.
  1070. */
  1071. if (diff > 0 && --keyno < 1)
  1072. keyno = 1;
  1073. agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, keyno, cur));
  1074. #ifdef DEBUG
  1075. if ((error = xfs_btree_check_sptr(cur, agbno, level)))
  1076. return error;
  1077. #endif
  1078. cur->bc_ptrs[level] = keyno;
  1079. }
  1080. }
  1081. /*
  1082. * Done with the search.
  1083. * See if we need to adjust the results.
  1084. */
  1085. if (dir != XFS_LOOKUP_LE && diff < 0) {
  1086. keyno++;
  1087. /*
  1088. * If ge search and we went off the end of the block, but it's
  1089. * not the last block, we're in the wrong block.
  1090. */
  1091. if (dir == XFS_LOOKUP_GE &&
  1092. keyno > be16_to_cpu(block->bb_numrecs) &&
  1093. be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) {
  1094. int i;
  1095. cur->bc_ptrs[0] = keyno;
  1096. if ((error = xfs_alloc_increment(cur, 0, &i)))
  1097. return error;
  1098. XFS_WANT_CORRUPTED_RETURN(i == 1);
  1099. *stat = 1;
  1100. return 0;
  1101. }
  1102. }
  1103. else if (dir == XFS_LOOKUP_LE && diff > 0)
  1104. keyno--;
  1105. cur->bc_ptrs[0] = keyno;
  1106. /*
  1107. * Return if we succeeded or not.
  1108. */
  1109. if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs))
  1110. *stat = 0;
  1111. else
  1112. *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0));
  1113. return 0;
  1114. }
  1115. /*
  1116. * Move 1 record left from cur/level if possible.
  1117. * Update cur to reflect the new path.
  1118. */
  1119. STATIC int /* error */
  1120. xfs_alloc_lshift(
  1121. xfs_btree_cur_t *cur, /* btree cursor */
  1122. int level, /* level to shift record on */
  1123. int *stat) /* success/failure */
  1124. {
  1125. int error; /* error return value */
  1126. #ifdef DEBUG
  1127. int i; /* loop index */
  1128. #endif
  1129. xfs_alloc_key_t key; /* key value for leaf level upward */
  1130. xfs_buf_t *lbp; /* buffer for left neighbor block */
  1131. xfs_alloc_block_t *left; /* left neighbor btree block */
  1132. int nrec; /* new number of left block entries */
  1133. xfs_buf_t *rbp; /* buffer for right (current) block */
  1134. xfs_alloc_block_t *right; /* right (current) btree block */
  1135. xfs_alloc_key_t *rkp=NULL; /* key pointer for right block */
  1136. xfs_alloc_ptr_t *rpp=NULL; /* address pointer for right block */
  1137. xfs_alloc_rec_t *rrp=NULL; /* record pointer for right block */
  1138. /*
  1139. * Set up variables for this block as "right".
  1140. */
  1141. rbp = cur->bc_bufs[level];
  1142. right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
  1143. #ifdef DEBUG
  1144. if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
  1145. return error;
  1146. #endif
  1147. /*
  1148. * If we've got no left sibling then we can't shift an entry left.
  1149. */
  1150. if (be32_to_cpu(right->bb_leftsib) == NULLAGBLOCK) {
  1151. *stat = 0;
  1152. return 0;
  1153. }
  1154. /*
  1155. * If the cursor entry is the one that would be moved, don't
  1156. * do it... it's too complicated.
  1157. */
  1158. if (cur->bc_ptrs[level] <= 1) {
  1159. *stat = 0;
  1160. return 0;
  1161. }
  1162. /*
  1163. * Set up the left neighbor as "left".
  1164. */
  1165. if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
  1166. cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib),
  1167. 0, &lbp, XFS_ALLOC_BTREE_REF)))
  1168. return error;
  1169. left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
  1170. if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
  1171. return error;
  1172. /*
  1173. * If it's full, it can't take another entry.
  1174. */
  1175. if (be16_to_cpu(left->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
  1176. *stat = 0;
  1177. return 0;
  1178. }
  1179. nrec = be16_to_cpu(left->bb_numrecs) + 1;
  1180. /*
  1181. * If non-leaf, copy a key and a ptr to the left block.
  1182. */
  1183. if (level > 0) {
  1184. xfs_alloc_key_t *lkp; /* key pointer for left block */
  1185. xfs_alloc_ptr_t *lpp; /* address pointer for left block */
  1186. lkp = XFS_ALLOC_KEY_ADDR(left, nrec, cur);
  1187. rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
  1188. *lkp = *rkp;
  1189. xfs_alloc_log_keys(cur, lbp, nrec, nrec);
  1190. lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur);
  1191. rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
  1192. #ifdef DEBUG
  1193. if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level)))
  1194. return error;
  1195. #endif
  1196. *lpp = *rpp; /* INT_: copy */
  1197. xfs_alloc_log_ptrs(cur, lbp, nrec, nrec);
  1198. xfs_btree_check_key(cur->bc_btnum, lkp - 1, lkp);
  1199. }
  1200. /*
  1201. * If leaf, copy a record to the left block.
  1202. */
  1203. else {
  1204. xfs_alloc_rec_t *lrp; /* record pointer for left block */
  1205. lrp = XFS_ALLOC_REC_ADDR(left, nrec, cur);
  1206. rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
  1207. *lrp = *rrp;
  1208. xfs_alloc_log_recs(cur, lbp, nrec, nrec);
  1209. xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp);
  1210. }
  1211. /*
  1212. * Bump and log left's numrecs, decrement and log right's numrecs.
  1213. */
  1214. be16_add(&left->bb_numrecs, 1);
  1215. xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
  1216. be16_add(&right->bb_numrecs, -1);
  1217. xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
  1218. /*
  1219. * Slide the contents of right down one entry.
  1220. */
  1221. if (level > 0) {
  1222. #ifdef DEBUG
  1223. for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
  1224. if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i + 1]),
  1225. level)))
  1226. return error;
  1227. }
  1228. #endif
  1229. memmove(rkp, rkp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
  1230. memmove(rpp, rpp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
  1231. xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
  1232. xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
  1233. } else {
  1234. memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
  1235. xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
  1236. key.ar_startblock = rrp->ar_startblock;
  1237. key.ar_blockcount = rrp->ar_blockcount;
  1238. rkp = &key;
  1239. }
  1240. /*
  1241. * Update the parent key values of right.
  1242. */
  1243. if ((error = xfs_alloc_updkey(cur, rkp, level + 1)))
  1244. return error;
  1245. /*
  1246. * Slide the cursor value left one.
  1247. */
  1248. cur->bc_ptrs[level]--;
  1249. *stat = 1;
  1250. return 0;
  1251. }
  1252. /*
  1253. * Allocate a new root block, fill it in.
  1254. */
  1255. STATIC int /* error */
  1256. xfs_alloc_newroot(
  1257. xfs_btree_cur_t *cur, /* btree cursor */
  1258. int *stat) /* success/failure */
  1259. {
  1260. int error; /* error return value */
  1261. xfs_agblock_t lbno; /* left block number */
  1262. xfs_buf_t *lbp; /* left btree buffer */
  1263. xfs_alloc_block_t *left; /* left btree block */
  1264. xfs_mount_t *mp; /* mount structure */
  1265. xfs_agblock_t nbno; /* new block number */
  1266. xfs_buf_t *nbp; /* new (root) buffer */
  1267. xfs_alloc_block_t *new; /* new (root) btree block */
  1268. int nptr; /* new value for key index, 1 or 2 */
  1269. xfs_agblock_t rbno; /* right block number */
  1270. xfs_buf_t *rbp; /* right btree buffer */
  1271. xfs_alloc_block_t *right; /* right btree block */
  1272. mp = cur->bc_mp;
  1273. ASSERT(cur->bc_nlevels < XFS_AG_MAXLEVELS(mp));
  1274. /*
  1275. * Get a buffer from the freelist blocks, for the new root.
  1276. */
  1277. if ((error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
  1278. &nbno)))
  1279. return error;
  1280. /*
  1281. * None available, we fail.
  1282. */
  1283. if (nbno == NULLAGBLOCK) {
  1284. *stat = 0;
  1285. return 0;
  1286. }
  1287. xfs_trans_agbtree_delta(cur->bc_tp, 1);
  1288. nbp = xfs_btree_get_bufs(mp, cur->bc_tp, cur->bc_private.a.agno, nbno,
  1289. 0);
  1290. new = XFS_BUF_TO_ALLOC_BLOCK(nbp);
  1291. /*
  1292. * Set the root data in the a.g. freespace structure.
  1293. */
  1294. {
  1295. xfs_agf_t *agf; /* a.g. freespace header */
  1296. xfs_agnumber_t seqno;
  1297. agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
  1298. agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno);
  1299. be32_add(&agf->agf_levels[cur->bc_btnum], 1);
  1300. seqno = be32_to_cpu(agf->agf_seqno);
  1301. mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++;
  1302. xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
  1303. XFS_AGF_ROOTS | XFS_AGF_LEVELS);
  1304. }
  1305. /*
  1306. * At the previous root level there are now two blocks: the old
  1307. * root, and the new block generated when it was split.
  1308. * We don't know which one the cursor is pointing at, so we
  1309. * set up variables "left" and "right" for each case.
  1310. */
  1311. lbp = cur->bc_bufs[cur->bc_nlevels - 1];
  1312. left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
  1313. #ifdef DEBUG
  1314. if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp)))
  1315. return error;
  1316. #endif
  1317. if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) {
  1318. /*
  1319. * Our block is left, pick up the right block.
  1320. */
  1321. lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp));
  1322. rbno = be32_to_cpu(left->bb_rightsib);
  1323. if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
  1324. cur->bc_private.a.agno, rbno, 0, &rbp,
  1325. XFS_ALLOC_BTREE_REF)))
  1326. return error;
  1327. right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
  1328. if ((error = xfs_btree_check_sblock(cur, right,
  1329. cur->bc_nlevels - 1, rbp)))
  1330. return error;
  1331. nptr = 1;
  1332. } else {
  1333. /*
  1334. * Our block is right, pick up the left block.
  1335. */
  1336. rbp = lbp;
  1337. right = left;
  1338. rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp));
  1339. lbno = be32_to_cpu(right->bb_leftsib);
  1340. if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
  1341. cur->bc_private.a.agno, lbno, 0, &lbp,
  1342. XFS_ALLOC_BTREE_REF)))
  1343. return error;
  1344. left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
  1345. if ((error = xfs_btree_check_sblock(cur, left,
  1346. cur->bc_nlevels - 1, lbp)))
  1347. return error;
  1348. nptr = 2;
  1349. }
  1350. /*
  1351. * Fill in the new block's btree header and log it.
  1352. */
  1353. new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
  1354. new->bb_level = cpu_to_be16(cur->bc_nlevels);
  1355. new->bb_numrecs = cpu_to_be16(2);
  1356. new->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
  1357. new->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
  1358. xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS);
  1359. ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK);
  1360. /*
  1361. * Fill in the key data in the new root.
  1362. */
  1363. {
  1364. xfs_alloc_key_t *kp; /* btree key pointer */
  1365. kp = XFS_ALLOC_KEY_ADDR(new, 1, cur);
  1366. if (be16_to_cpu(left->bb_level) > 0) {
  1367. kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); /* INT_: structure copy */
  1368. kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);/* INT_: structure copy */
  1369. } else {
  1370. xfs_alloc_rec_t *rp; /* btree record pointer */
  1371. rp = XFS_ALLOC_REC_ADDR(left, 1, cur);
  1372. kp[0].ar_startblock = rp->ar_startblock;
  1373. kp[0].ar_blockcount = rp->ar_blockcount;
  1374. rp = XFS_ALLOC_REC_ADDR(right, 1, cur);
  1375. kp[1].ar_startblock = rp->ar_startblock;
  1376. kp[1].ar_blockcount = rp->ar_blockcount;
  1377. }
  1378. }
  1379. xfs_alloc_log_keys(cur, nbp, 1, 2);
  1380. /*
  1381. * Fill in the pointer data in the new root.
  1382. */
  1383. {
  1384. xfs_alloc_ptr_t *pp; /* btree address pointer */
  1385. pp = XFS_ALLOC_PTR_ADDR(new, 1, cur);
  1386. pp[0] = cpu_to_be32(lbno);
  1387. pp[1] = cpu_to_be32(rbno);
  1388. }
  1389. xfs_alloc_log_ptrs(cur, nbp, 1, 2);
  1390. /*
  1391. * Fix up the cursor.
  1392. */
  1393. xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
  1394. cur->bc_ptrs[cur->bc_nlevels] = nptr;
  1395. cur->bc_nlevels++;
  1396. *stat = 1;
  1397. return 0;
  1398. }
  1399. /*
  1400. * Move 1 record right from cur/level if possible.
  1401. * Update cur to reflect the new path.
  1402. */
  1403. STATIC int /* error */
  1404. xfs_alloc_rshift(
  1405. xfs_btree_cur_t *cur, /* btree cursor */
  1406. int level, /* level to shift record on */
  1407. int *stat) /* success/failure */
  1408. {
  1409. int error; /* error return value */
  1410. int i; /* loop index */
  1411. xfs_alloc_key_t key; /* key value for leaf level upward */
  1412. xfs_buf_t *lbp; /* buffer for left (current) block */
  1413. xfs_alloc_block_t *left; /* left (current) btree block */
  1414. xfs_buf_t *rbp; /* buffer for right neighbor block */
  1415. xfs_alloc_block_t *right; /* right neighbor btree block */
  1416. xfs_alloc_key_t *rkp; /* key pointer for right block */
  1417. xfs_btree_cur_t *tcur; /* temporary cursor */
  1418. /*
  1419. * Set up variables for this block as "left".
  1420. */
  1421. lbp = cur->bc_bufs[level];
  1422. left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
  1423. #ifdef DEBUG
  1424. if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
  1425. return error;
  1426. #endif
  1427. /*
  1428. * If we've got no right sibling then we can't shift an entry right.
  1429. */
  1430. if (be32_to_cpu(left->bb_rightsib) == NULLAGBLOCK) {
  1431. *stat = 0;
  1432. return 0;
  1433. }
  1434. /*
  1435. * If the cursor entry is the one that would be moved, don't
  1436. * do it... it's too complicated.
  1437. */
  1438. if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) {
  1439. *stat = 0;
  1440. return 0;
  1441. }
  1442. /*
  1443. * Set up the right neighbor as "right".
  1444. */
  1445. if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
  1446. cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib),
  1447. 0, &rbp, XFS_ALLOC_BTREE_REF)))
  1448. return error;
  1449. right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
  1450. if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
  1451. return error;
  1452. /*
  1453. * If it's full, it can't take another entry.
  1454. */
  1455. if (be16_to_cpu(right->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
  1456. *stat = 0;
  1457. return 0;
  1458. }
  1459. /*
  1460. * Make a hole at the start of the right neighbor block, then
  1461. * copy the last left block entry to the hole.
  1462. */
  1463. if (level > 0) {
  1464. xfs_alloc_key_t *lkp; /* key pointer for left block */
  1465. xfs_alloc_ptr_t *lpp; /* address pointer for left block */
  1466. xfs_alloc_ptr_t *rpp; /* address pointer for right block */
  1467. lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
  1468. lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
  1469. rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
  1470. rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
  1471. #ifdef DEBUG
  1472. for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) {
  1473. if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
  1474. return error;
  1475. }
  1476. #endif
  1477. memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
  1478. memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
  1479. #ifdef DEBUG
  1480. if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level)))
  1481. return error;
  1482. #endif
  1483. *rkp = *lkp; /* INT_: copy */
  1484. *rpp = *lpp; /* INT_: copy */
  1485. xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
  1486. xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
  1487. xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1);
  1488. } else {
  1489. xfs_alloc_rec_t *lrp; /* record pointer for left block */
  1490. xfs_alloc_rec_t *rrp; /* record pointer for right block */
  1491. lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
  1492. rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
  1493. memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
  1494. *rrp = *lrp;
  1495. xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
  1496. key.ar_startblock = rrp->ar_startblock;
  1497. key.ar_blockcount = rrp->ar_blockcount;
  1498. rkp = &key;
  1499. xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1);
  1500. }
  1501. /*
  1502. * Decrement and log left's numrecs, bump and log right's numrecs.
  1503. */
  1504. be16_add(&left->bb_numrecs, -1);
  1505. xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
  1506. be16_add(&right->bb_numrecs, 1);
  1507. xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
  1508. /*
  1509. * Using a temporary cursor, update the parent key values of the
  1510. * block on the right.
  1511. */
  1512. if ((error = xfs_btree_dup_cursor(cur, &tcur)))
  1513. return error;
  1514. i = xfs_btree_lastrec(tcur, level);
  1515. XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
  1516. if ((error = xfs_alloc_increment(tcur, level, &i)) ||
  1517. (error = xfs_alloc_updkey(tcur, rkp, level + 1)))
  1518. goto error0;
  1519. xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
  1520. *stat = 1;
  1521. return 0;
  1522. error0:
  1523. xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
  1524. return error;
  1525. }
  1526. /*
  1527. * Split cur/level block in half.
  1528. * Return new block number and its first record (to be inserted into parent).
  1529. */
  1530. STATIC int /* error */
  1531. xfs_alloc_split(
  1532. xfs_btree_cur_t *cur, /* btree cursor */
  1533. int level, /* level to split */
  1534. xfs_agblock_t *bnop, /* output: block number allocated */
  1535. xfs_alloc_key_t *keyp, /* output: first key of new block */
  1536. xfs_btree_cur_t **curp, /* output: new cursor */
  1537. int *stat) /* success/failure */
  1538. {
  1539. int error; /* error return value */
  1540. int i; /* loop index/record number */
  1541. xfs_agblock_t lbno; /* left (current) block number */
  1542. xfs_buf_t *lbp; /* buffer for left block */
  1543. xfs_alloc_block_t *left; /* left (current) btree block */
  1544. xfs_agblock_t rbno; /* right (new) block number */
  1545. xfs_buf_t *rbp; /* buffer for right block */
  1546. xfs_alloc_block_t *right; /* right (new) btree block */
  1547. /*
  1548. * Allocate the new block from the freelist.
  1549. * If we can't do it, we're toast. Give up.
  1550. */
  1551. if ((error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
  1552. &rbno)))
  1553. return error;
  1554. if (rbno == NULLAGBLOCK) {
  1555. *stat = 0;
  1556. return 0;
  1557. }
  1558. xfs_trans_agbtree_delta(cur->bc_tp, 1);
  1559. rbp = xfs_btree_get_bufs(cur->bc_mp, cur->bc_tp, cur->bc_private.a.agno,
  1560. rbno, 0);
  1561. /*
  1562. * Set up the new block as "right".
  1563. */
  1564. right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
  1565. /*
  1566. * "Left" is the current (according to the cursor) block.
  1567. */
  1568. lbp = cur->bc_bufs[level];
  1569. left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
  1570. #ifdef DEBUG
  1571. if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
  1572. return error;
  1573. #endif
  1574. /*
  1575. * Fill in the btree header for the new block.
  1576. */
  1577. right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
  1578. right->bb_level = left->bb_level;
  1579. right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
  1580. /*
  1581. * Make sure that if there's an odd number of entries now, that
  1582. * each new block will have the same number of entries.
  1583. */
  1584. if ((be16_to_cpu(left->bb_numrecs) & 1) &&
  1585. cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
  1586. be16_add(&right->bb_numrecs, 1);
  1587. i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
  1588. /*
  1589. * For non-leaf blocks, copy keys and addresses over to the new block.
  1590. */
  1591. if (level > 0) {
  1592. xfs_alloc_key_t *lkp; /* left btree key pointer */
  1593. xfs_alloc_ptr_t *lpp; /* left btree address pointer */
  1594. xfs_alloc_key_t *rkp; /* right btree key pointer */
  1595. xfs_alloc_ptr_t *rpp; /* right btree address pointer */
  1596. lkp = XFS_ALLOC_KEY_ADDR(left, i, cur);
  1597. lpp = XFS_ALLOC_PTR_ADDR(left, i, cur);
  1598. rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
  1599. rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
  1600. #ifdef DEBUG
  1601. for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
  1602. if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
  1603. return error;
  1604. }
  1605. #endif
  1606. memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
  1607. memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
  1608. xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
  1609. xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
  1610. *keyp = *rkp;
  1611. }
  1612. /*
  1613. * For leaf blocks, copy records over to the new block.
  1614. */
  1615. else {
  1616. xfs_alloc_rec_t *lrp; /* left btree record pointer */
  1617. xfs_alloc_rec_t *rrp; /* right btree record pointer */
  1618. lrp = XFS_ALLOC_REC_ADDR(left, i, cur);
  1619. rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
  1620. memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
  1621. xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
  1622. keyp->ar_startblock = rrp->ar_startblock;
  1623. keyp->ar_blockcount = rrp->ar_blockcount;
  1624. }
  1625. /*
  1626. * Find the left block number by looking in the buffer.
  1627. * Adjust numrecs, sibling pointers.
  1628. */
  1629. lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp));
  1630. be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
  1631. right->bb_rightsib = left->bb_rightsib;
  1632. left->bb_rightsib = cpu_to_be32(rbno);
  1633. right->bb_leftsib = cpu_to_be32(lbno);
  1634. xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS);
  1635. xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
  1636. /*
  1637. * If there's a block to the new block's right, make that block
  1638. * point back to right instead of to left.
  1639. */
  1640. if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) {
  1641. xfs_alloc_block_t *rrblock; /* rr btree block */
  1642. xfs_buf_t *rrbp; /* buffer for rrblock */
  1643. if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
  1644. cur->bc_private.a.agno, be32_to_cpu(right->bb_rightsib), 0,
  1645. &rrbp, XFS_ALLOC_BTREE_REF)))
  1646. return error;
  1647. rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp);
  1648. if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
  1649. return error;
  1650. rrblock->bb_leftsib = cpu_to_be32(rbno);
  1651. xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
  1652. }
  1653. /*
  1654. * If the cursor is really in the right block, move it there.
  1655. * If it's just pointing past the last entry in left, then we'll
  1656. * insert there, so don't change anything in that case.
  1657. */
  1658. if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) {
  1659. xfs_btree_setbuf(cur, level, rbp);
  1660. cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs);
  1661. }
  1662. /*
  1663. * If there are more levels, we'll need another cursor which refers to
  1664. * the right block, no matter where this cursor was.
  1665. */
  1666. if (level + 1 < cur->bc_nlevels) {
  1667. if ((error = xfs_btree_dup_cursor(cur, curp)))
  1668. return error;
  1669. (*curp)->bc_ptrs[level + 1]++;
  1670. }
  1671. *bnop = rbno;
  1672. *stat = 1;
  1673. return 0;
  1674. }
  1675. /*
  1676. * Update keys at all levels from here to the root along the cursor's path.
  1677. */
  1678. STATIC int /* error */
  1679. xfs_alloc_updkey(
  1680. xfs_btree_cur_t *cur, /* btree cursor */
  1681. xfs_alloc_key_t *keyp, /* new key value to update to */
  1682. int level) /* starting level for update */
  1683. {
  1684. int ptr; /* index of key in block */
  1685. /*
  1686. * Go up the tree from this level toward the root.
  1687. * At each level, update the key value to the value input.
  1688. * Stop when we reach a level where the cursor isn't pointing
  1689. * at the first entry in the block.
  1690. */
  1691. for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
  1692. xfs_alloc_block_t *block; /* btree block */
  1693. xfs_buf_t *bp; /* buffer for block */
  1694. #ifdef DEBUG
  1695. int error; /* error return value */
  1696. #endif
  1697. xfs_alloc_key_t *kp; /* ptr to btree block keys */
  1698. bp = cur->bc_bufs[level];
  1699. block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  1700. #ifdef DEBUG
  1701. if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
  1702. return error;
  1703. #endif
  1704. ptr = cur->bc_ptrs[level];
  1705. kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur);
  1706. *kp = *keyp;
  1707. xfs_alloc_log_keys(cur, bp, ptr, ptr);
  1708. }
  1709. return 0;
  1710. }
  1711. /*
  1712. * Externally visible routines.
  1713. */
  1714. /*
  1715. * Decrement cursor by one record at the level.
  1716. * For nonzero levels the leaf-ward information is untouched.
  1717. */
  1718. int /* error */
  1719. xfs_alloc_decrement(
  1720. xfs_btree_cur_t *cur, /* btree cursor */
  1721. int level, /* level in btree, 0 is leaf */
  1722. int *stat) /* success/failure */
  1723. {
  1724. xfs_alloc_block_t *block; /* btree block */
  1725. int error; /* error return value */
  1726. int lev; /* btree level */
  1727. ASSERT(level < cur->bc_nlevels);
  1728. /*
  1729. * Read-ahead to the left at this level.
  1730. */
  1731. xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
  1732. /*
  1733. * Decrement the ptr at this level. If we're still in the block
  1734. * then we're done.
  1735. */
  1736. if (--cur->bc_ptrs[level] > 0) {
  1737. *stat = 1;
  1738. return 0;
  1739. }
  1740. /*
  1741. * Get a pointer to the btree block.
  1742. */
  1743. block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[level]);
  1744. #ifdef DEBUG
  1745. if ((error = xfs_btree_check_sblock(cur, block, level,
  1746. cur->bc_bufs[level])))
  1747. return error;
  1748. #endif
  1749. /*
  1750. * If we just went off the left edge of the tree, return failure.
  1751. */
  1752. if (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK) {
  1753. *stat = 0;
  1754. return 0;
  1755. }
  1756. /*
  1757. * March up the tree decrementing pointers.
  1758. * Stop when we don't go off the left edge of a block.
  1759. */
  1760. for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
  1761. if (--cur->bc_ptrs[lev] > 0)
  1762. break;
  1763. /*
  1764. * Read-ahead the left block, we're going to read it
  1765. * in the next loop.
  1766. */
  1767. xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
  1768. }
  1769. /*
  1770. * If we went off the root then we are seriously confused.
  1771. */
  1772. ASSERT(lev < cur->bc_nlevels);
  1773. /*
  1774. * Now walk back down the tree, fixing up the cursor's buffer
  1775. * pointers and key numbers.
  1776. */
  1777. for (block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]); lev > level; ) {
  1778. xfs_agblock_t agbno; /* block number of btree block */
  1779. xfs_buf_t *bp; /* buffer pointer for block */
  1780. agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
  1781. if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
  1782. cur->bc_private.a.agno, agbno, 0, &bp,
  1783. XFS_ALLOC_BTREE_REF)))
  1784. return error;
  1785. lev--;
  1786. xfs_btree_setbuf(cur, lev, bp);
  1787. block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  1788. if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
  1789. return error;
  1790. cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs);
  1791. }
  1792. *stat = 1;
  1793. return 0;
  1794. }
  1795. /*
  1796. * Delete the record pointed to by cur.
  1797. * The cursor refers to the place where the record was (could be inserted)
  1798. * when the operation returns.
  1799. */
  1800. int /* error */
  1801. xfs_alloc_delete(
  1802. xfs_btree_cur_t *cur, /* btree cursor */
  1803. int *stat) /* success/failure */
  1804. {
  1805. int error; /* error return value */
  1806. int i; /* result code */
  1807. int level; /* btree level */
  1808. /*
  1809. * Go up the tree, starting at leaf level.
  1810. * If 2 is returned then a join was done; go to the next level.
  1811. * Otherwise we are done.
  1812. */
  1813. for (level = 0, i = 2; i == 2; level++) {
  1814. if ((error = xfs_alloc_delrec(cur, level, &i)))
  1815. return error;
  1816. }
  1817. if (i == 0) {
  1818. for (level = 1; level < cur->bc_nlevels; level++) {
  1819. if (cur->bc_ptrs[level] == 0) {
  1820. if ((error = xfs_alloc_decrement(cur, level, &i)))
  1821. return error;
  1822. break;
  1823. }
  1824. }
  1825. }
  1826. *stat = i;
  1827. return 0;
  1828. }
  1829. /*
  1830. * Get the data from the pointed-to record.
  1831. */
  1832. int /* error */
  1833. xfs_alloc_get_rec(
  1834. xfs_btree_cur_t *cur, /* btree cursor */
  1835. xfs_agblock_t *bno, /* output: starting block of extent */
  1836. xfs_extlen_t *len, /* output: length of extent */
  1837. int *stat) /* output: success/failure */
  1838. {
  1839. xfs_alloc_block_t *block; /* btree block */
  1840. #ifdef DEBUG
  1841. int error; /* error return value */
  1842. #endif
  1843. int ptr; /* record number */
  1844. ptr = cur->bc_ptrs[0];
  1845. block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]);
  1846. #ifdef DEBUG
  1847. if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0])))
  1848. return error;
  1849. #endif
  1850. /*
  1851. * Off the right end or left end, return failure.
  1852. */
  1853. if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) {
  1854. *stat = 0;
  1855. return 0;
  1856. }
  1857. /*
  1858. * Point to the record and extract its data.
  1859. */
  1860. {
  1861. xfs_alloc_rec_t *rec; /* record data */
  1862. rec = XFS_ALLOC_REC_ADDR(block, ptr, cur);
  1863. *bno = be32_to_cpu(rec->ar_startblock);
  1864. *len = be32_to_cpu(rec->ar_blockcount);
  1865. }
  1866. *stat = 1;
  1867. return 0;
  1868. }
  1869. /*
  1870. * Increment cursor by one record at the level.
  1871. * For nonzero levels the leaf-ward information is untouched.
  1872. */
  1873. int /* error */
  1874. xfs_alloc_increment(
  1875. xfs_btree_cur_t *cur, /* btree cursor */
  1876. int level, /* level in btree, 0 is leaf */
  1877. int *stat) /* success/failure */
  1878. {
  1879. xfs_alloc_block_t *block; /* btree block */
  1880. xfs_buf_t *bp; /* tree block buffer */
  1881. int error; /* error return value */
  1882. int lev; /* btree level */
  1883. ASSERT(level < cur->bc_nlevels);
  1884. /*
  1885. * Read-ahead to the right at this level.
  1886. */
  1887. xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
  1888. /*
  1889. * Get a pointer to the btree block.
  1890. */
  1891. bp = cur->bc_bufs[level];
  1892. block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  1893. #ifdef DEBUG
  1894. if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
  1895. return error;
  1896. #endif
  1897. /*
  1898. * Increment the ptr at this level. If we're still in the block
  1899. * then we're done.
  1900. */
  1901. if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) {
  1902. *stat = 1;
  1903. return 0;
  1904. }
  1905. /*
  1906. * If we just went off the right edge of the tree, return failure.
  1907. */
  1908. if (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK) {
  1909. *stat = 0;
  1910. return 0;
  1911. }
  1912. /*
  1913. * March up the tree incrementing pointers.
  1914. * Stop when we don't go off the right edge of a block.
  1915. */
  1916. for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
  1917. bp = cur->bc_bufs[lev];
  1918. block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  1919. #ifdef DEBUG
  1920. if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
  1921. return error;
  1922. #endif
  1923. if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs))
  1924. break;
  1925. /*
  1926. * Read-ahead the right block, we're going to read it
  1927. * in the next loop.
  1928. */
  1929. xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
  1930. }
  1931. /*
  1932. * If we went off the root then we are seriously confused.
  1933. */
  1934. ASSERT(lev < cur->bc_nlevels);
  1935. /*
  1936. * Now walk back down the tree, fixing up the cursor's buffer
  1937. * pointers and key numbers.
  1938. */
  1939. for (bp = cur->bc_bufs[lev], block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  1940. lev > level; ) {
  1941. xfs_agblock_t agbno; /* block number of btree block */
  1942. agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
  1943. if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
  1944. cur->bc_private.a.agno, agbno, 0, &bp,
  1945. XFS_ALLOC_BTREE_REF)))
  1946. return error;
  1947. lev--;
  1948. xfs_btree_setbuf(cur, lev, bp);
  1949. block = XFS_BUF_TO_ALLOC_BLOCK(bp);
  1950. if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
  1951. return error;
  1952. cur->bc_ptrs[lev] = 1;
  1953. }
  1954. *stat = 1;
  1955. return 0;
  1956. }
  1957. /*
  1958. * Insert the current record at the point referenced by cur.
  1959. * The cursor may be inconsistent on return if splits have been done.
  1960. */
  1961. int /* error */
  1962. xfs_alloc_insert(
  1963. xfs_btree_cur_t *cur, /* btree cursor */
  1964. int *stat) /* success/failure */
  1965. {
  1966. int error; /* error return value */
  1967. int i; /* result value, 0 for failure */
  1968. int level; /* current level number in btree */
  1969. xfs_agblock_t nbno; /* new block number (split result) */
  1970. xfs_btree_cur_t *ncur; /* new cursor (split result) */
  1971. xfs_alloc_rec_t nrec; /* record being inserted this level */
  1972. xfs_btree_cur_t *pcur; /* previous level's cursor */
  1973. level = 0;
  1974. nbno = NULLAGBLOCK;
  1975. nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
  1976. nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
  1977. ncur = (xfs_btree_cur_t *)0;
  1978. pcur = cur;
  1979. /*
  1980. * Loop going up the tree, starting at the leaf level.
  1981. * Stop when we don't get a split block, that must mean that
  1982. * the insert is finished with this level.
  1983. */
  1984. do {
  1985. /*
  1986. * Insert nrec/nbno into this level of the tree.
  1987. * Note if we fail, nbno will be null.
  1988. */
  1989. if ((error = xfs_alloc_insrec(pcur, level++, &nbno, &nrec, &ncur,
  1990. &i))) {
  1991. if (pcur != cur)
  1992. xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
  1993. return error;
  1994. }
  1995. /*
  1996. * See if the cursor we just used is trash.
  1997. * Can't trash the caller's cursor, but otherwise we should
  1998. * if ncur is a new cursor or we're about to be done.
  1999. */
  2000. if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) {
  2001. cur->bc_nlevels = pcur->bc_nlevels;
  2002. xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
  2003. }
  2004. /*
  2005. * If we got a new cursor, switch to it.
  2006. */
  2007. if (ncur) {
  2008. pcur = ncur;
  2009. ncur = (xfs_btree_cur_t *)0;
  2010. }
  2011. } while (nbno != NULLAGBLOCK);
  2012. *stat = i;
  2013. return 0;
  2014. }
  2015. /*
  2016. * Lookup the record equal to [bno, len] in the btree given by cur.
  2017. */
  2018. int /* error */
  2019. xfs_alloc_lookup_eq(
  2020. xfs_btree_cur_t *cur, /* btree cursor */
  2021. xfs_agblock_t bno, /* starting block of extent */
  2022. xfs_extlen_t len, /* length of extent */
  2023. int *stat) /* success/failure */
  2024. {
  2025. cur->bc_rec.a.ar_startblock = bno;
  2026. cur->bc_rec.a.ar_blockcount = len;
  2027. return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, stat);
  2028. }
  2029. /*
  2030. * Lookup the first record greater than or equal to [bno, len]
  2031. * in the btree given by cur.
  2032. */
  2033. int /* error */
  2034. xfs_alloc_lookup_ge(
  2035. xfs_btree_cur_t *cur, /* btree cursor */
  2036. xfs_agblock_t bno, /* starting block of extent */
  2037. xfs_extlen_t len, /* length of extent */
  2038. int *stat) /* success/failure */
  2039. {
  2040. cur->bc_rec.a.ar_startblock = bno;
  2041. cur->bc_rec.a.ar_blockcount = len;
  2042. return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, stat);
  2043. }
  2044. /*
  2045. * Lookup the first record less than or equal to [bno, len]
  2046. * in the btree given by cur.
  2047. */
  2048. int /* error */
  2049. xfs_alloc_lookup_le(
  2050. xfs_btree_cur_t *cur, /* btree cursor */
  2051. xfs_agblock_t bno, /* starting block of extent */
  2052. xfs_extlen_t len, /* length of extent */
  2053. int *stat) /* success/failure */
  2054. {
  2055. cur->bc_rec.a.ar_startblock = bno;
  2056. cur->bc_rec.a.ar_blockcount = len;
  2057. return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, stat);
  2058. }
  2059. /*
  2060. * Update the record referred to by cur, to the value given by [bno, len].
  2061. * This either works (return 0) or gets an EFSCORRUPTED error.
  2062. */
  2063. int /* error */
  2064. xfs_alloc_update(
  2065. xfs_btree_cur_t *cur, /* btree cursor */
  2066. xfs_agblock_t bno, /* starting block of extent */
  2067. xfs_extlen_t len) /* length of extent */
  2068. {
  2069. xfs_alloc_block_t *block; /* btree block to update */
  2070. int error; /* error return value */
  2071. int ptr; /* current record number (updating) */
  2072. ASSERT(len > 0);
  2073. /*
  2074. * Pick up the a.g. freelist struct and the current block.
  2075. */
  2076. block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]);
  2077. #ifdef DEBUG
  2078. if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0])))
  2079. return error;
  2080. #endif
  2081. /*
  2082. * Get the address of the rec to be updated.
  2083. */
  2084. ptr = cur->bc_ptrs[0];
  2085. {
  2086. xfs_alloc_rec_t *rp; /* pointer to updated record */
  2087. rp = XFS_ALLOC_REC_ADDR(block, ptr, cur);
  2088. /*
  2089. * Fill in the new contents and log them.
  2090. */
  2091. rp->ar_startblock = cpu_to_be32(bno);
  2092. rp->ar_blockcount = cpu_to_be32(len);
  2093. xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr);
  2094. }
  2095. /*
  2096. * If it's the by-size btree and it's the last leaf block and
  2097. * it's the last record... then update the size of the longest
  2098. * extent in the a.g., which we cache in the a.g. freelist header.
  2099. */
  2100. if (cur->bc_btnum == XFS_BTNUM_CNT &&
  2101. be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
  2102. ptr == be16_to_cpu(block->bb_numrecs)) {
  2103. xfs_agf_t *agf; /* a.g. freespace header */
  2104. xfs_agnumber_t seqno;
  2105. agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
  2106. seqno = be32_to_cpu(agf->agf_seqno);
  2107. cur->bc_mp->m_perag[seqno].pagf_longest = len;
  2108. agf->agf_longest = cpu_to_be32(len);
  2109. xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
  2110. XFS_AGF_LONGEST);
  2111. }
  2112. /*
  2113. * Updating first record in leaf. Pass new key value up to our parent.
  2114. */
  2115. if (ptr == 1) {
  2116. xfs_alloc_key_t key; /* key containing [bno, len] */
  2117. key.ar_startblock = cpu_to_be32(bno);
  2118. key.ar_blockcount = cpu_to_be32(len);
  2119. if ((error = xfs_alloc_updkey(cur, &key, 1)))
  2120. return error;
  2121. }
  2122. return 0;
  2123. }