xfs_da_btree.c 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_types.h"
  21. #include "xfs_bit.h"
  22. #include "xfs_log.h"
  23. #include "xfs_inum.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_ag.h"
  27. #include "xfs_mount.h"
  28. #include "xfs_da_btree.h"
  29. #include "xfs_bmap_btree.h"
  30. #include "xfs_dir2.h"
  31. #include "xfs_dir2_format.h"
  32. #include "xfs_dir2_priv.h"
  33. #include "xfs_dinode.h"
  34. #include "xfs_inode.h"
  35. #include "xfs_inode_item.h"
  36. #include "xfs_alloc.h"
  37. #include "xfs_bmap.h"
  38. #include "xfs_attr.h"
  39. #include "xfs_attr_leaf.h"
  40. #include "xfs_error.h"
  41. #include "xfs_trace.h"
  42. /*
  43. * xfs_da_btree.c
  44. *
  45. * Routines to implement directories as Btrees of hashed names.
  46. */
  47. /*========================================================================
  48. * Function prototypes for the kernel.
  49. *========================================================================*/
  50. /*
  51. * Routines used for growing the Btree.
  52. */
  53. STATIC int xfs_da_root_split(xfs_da_state_t *state,
  54. xfs_da_state_blk_t *existing_root,
  55. xfs_da_state_blk_t *new_child);
  56. STATIC int xfs_da_node_split(xfs_da_state_t *state,
  57. xfs_da_state_blk_t *existing_blk,
  58. xfs_da_state_blk_t *split_blk,
  59. xfs_da_state_blk_t *blk_to_add,
  60. int treelevel,
  61. int *result);
  62. STATIC void xfs_da_node_rebalance(xfs_da_state_t *state,
  63. xfs_da_state_blk_t *node_blk_1,
  64. xfs_da_state_blk_t *node_blk_2);
  65. STATIC void xfs_da_node_add(xfs_da_state_t *state,
  66. xfs_da_state_blk_t *old_node_blk,
  67. xfs_da_state_blk_t *new_node_blk);
  68. /*
  69. * Routines used for shrinking the Btree.
  70. */
  71. STATIC int xfs_da_root_join(xfs_da_state_t *state,
  72. xfs_da_state_blk_t *root_blk);
  73. STATIC int xfs_da_node_toosmall(xfs_da_state_t *state, int *retval);
  74. STATIC void xfs_da_node_remove(xfs_da_state_t *state,
  75. xfs_da_state_blk_t *drop_blk);
  76. STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
  77. xfs_da_state_blk_t *src_node_blk,
  78. xfs_da_state_blk_t *dst_node_blk);
  79. /*
  80. * Utility routines.
  81. */
  82. STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
  83. STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
  84. STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps);
  85. STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
  86. xfs_da_state_blk_t *drop_blk,
  87. xfs_da_state_blk_t *save_blk);
  88. STATIC void xfs_da_state_kill_altpath(xfs_da_state_t *state);
  89. /*========================================================================
  90. * Routines used for growing the Btree.
  91. *========================================================================*/
  92. /*
  93. * Create the initial contents of an intermediate node.
  94. */
  95. int
  96. xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
  97. xfs_dabuf_t **bpp, int whichfork)
  98. {
  99. xfs_da_intnode_t *node;
  100. xfs_dabuf_t *bp;
  101. int error;
  102. xfs_trans_t *tp;
  103. tp = args->trans;
  104. error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
  105. if (error)
  106. return(error);
  107. ASSERT(bp != NULL);
  108. node = bp->data;
  109. node->hdr.info.forw = 0;
  110. node->hdr.info.back = 0;
  111. node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
  112. node->hdr.info.pad = 0;
  113. node->hdr.count = 0;
  114. node->hdr.level = cpu_to_be16(level);
  115. xfs_da_log_buf(tp, bp,
  116. XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
  117. *bpp = bp;
  118. return(0);
  119. }
  120. /*
  121. * Split a leaf node, rebalance, then possibly split
  122. * intermediate nodes, rebalance, etc.
  123. */
  124. int /* error */
  125. xfs_da_split(xfs_da_state_t *state)
  126. {
  127. xfs_da_state_blk_t *oldblk, *newblk, *addblk;
  128. xfs_da_intnode_t *node;
  129. xfs_dabuf_t *bp;
  130. int max, action, error, i;
  131. /*
  132. * Walk back up the tree splitting/inserting/adjusting as necessary.
  133. * If we need to insert and there isn't room, split the node, then
  134. * decide which fragment to insert the new block from below into.
  135. * Note that we may split the root this way, but we need more fixup.
  136. */
  137. max = state->path.active - 1;
  138. ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
  139. ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
  140. state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
  141. addblk = &state->path.blk[max]; /* initial dummy value */
  142. for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
  143. oldblk = &state->path.blk[i];
  144. newblk = &state->altpath.blk[i];
  145. /*
  146. * If a leaf node then
  147. * Allocate a new leaf node, then rebalance across them.
  148. * else if an intermediate node then
  149. * We split on the last layer, must we split the node?
  150. */
  151. switch (oldblk->magic) {
  152. case XFS_ATTR_LEAF_MAGIC:
  153. error = xfs_attr_leaf_split(state, oldblk, newblk);
  154. if ((error != 0) && (error != ENOSPC)) {
  155. return(error); /* GROT: attr is inconsistent */
  156. }
  157. if (!error) {
  158. addblk = newblk;
  159. break;
  160. }
  161. /*
  162. * Entry wouldn't fit, split the leaf again.
  163. */
  164. state->extravalid = 1;
  165. if (state->inleaf) {
  166. state->extraafter = 0; /* before newblk */
  167. error = xfs_attr_leaf_split(state, oldblk,
  168. &state->extrablk);
  169. } else {
  170. state->extraafter = 1; /* after newblk */
  171. error = xfs_attr_leaf_split(state, newblk,
  172. &state->extrablk);
  173. }
  174. if (error)
  175. return(error); /* GROT: attr inconsistent */
  176. addblk = newblk;
  177. break;
  178. case XFS_DIR2_LEAFN_MAGIC:
  179. error = xfs_dir2_leafn_split(state, oldblk, newblk);
  180. if (error)
  181. return error;
  182. addblk = newblk;
  183. break;
  184. case XFS_DA_NODE_MAGIC:
  185. error = xfs_da_node_split(state, oldblk, newblk, addblk,
  186. max - i, &action);
  187. xfs_da_buf_done(addblk->bp);
  188. addblk->bp = NULL;
  189. if (error)
  190. return(error); /* GROT: dir is inconsistent */
  191. /*
  192. * Record the newly split block for the next time thru?
  193. */
  194. if (action)
  195. addblk = newblk;
  196. else
  197. addblk = NULL;
  198. break;
  199. }
  200. /*
  201. * Update the btree to show the new hashval for this child.
  202. */
  203. xfs_da_fixhashpath(state, &state->path);
  204. /*
  205. * If we won't need this block again, it's getting dropped
  206. * from the active path by the loop control, so we need
  207. * to mark it done now.
  208. */
  209. if (i > 0 || !addblk)
  210. xfs_da_buf_done(oldblk->bp);
  211. }
  212. if (!addblk)
  213. return(0);
  214. /*
  215. * Split the root node.
  216. */
  217. ASSERT(state->path.active == 0);
  218. oldblk = &state->path.blk[0];
  219. error = xfs_da_root_split(state, oldblk, addblk);
  220. if (error) {
  221. xfs_da_buf_done(oldblk->bp);
  222. xfs_da_buf_done(addblk->bp);
  223. addblk->bp = NULL;
  224. return(error); /* GROT: dir is inconsistent */
  225. }
  226. /*
  227. * Update pointers to the node which used to be block 0 and
  228. * just got bumped because of the addition of a new root node.
  229. * There might be three blocks involved if a double split occurred,
  230. * and the original block 0 could be at any position in the list.
  231. */
  232. node = oldblk->bp->data;
  233. if (node->hdr.info.forw) {
  234. if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
  235. bp = addblk->bp;
  236. } else {
  237. ASSERT(state->extravalid);
  238. bp = state->extrablk.bp;
  239. }
  240. node = bp->data;
  241. node->hdr.info.back = cpu_to_be32(oldblk->blkno);
  242. xfs_da_log_buf(state->args->trans, bp,
  243. XFS_DA_LOGRANGE(node, &node->hdr.info,
  244. sizeof(node->hdr.info)));
  245. }
  246. node = oldblk->bp->data;
  247. if (node->hdr.info.back) {
  248. if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
  249. bp = addblk->bp;
  250. } else {
  251. ASSERT(state->extravalid);
  252. bp = state->extrablk.bp;
  253. }
  254. node = bp->data;
  255. node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
  256. xfs_da_log_buf(state->args->trans, bp,
  257. XFS_DA_LOGRANGE(node, &node->hdr.info,
  258. sizeof(node->hdr.info)));
  259. }
  260. xfs_da_buf_done(oldblk->bp);
  261. xfs_da_buf_done(addblk->bp);
  262. addblk->bp = NULL;
  263. return(0);
  264. }
  265. /*
  266. * Split the root. We have to create a new root and point to the two
  267. * parts (the split old root) that we just created. Copy block zero to
  268. * the EOF, extending the inode in process.
  269. */
  270. STATIC int /* error */
  271. xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
  272. xfs_da_state_blk_t *blk2)
  273. {
  274. xfs_da_intnode_t *node, *oldroot;
  275. xfs_da_args_t *args;
  276. xfs_dablk_t blkno;
  277. xfs_dabuf_t *bp;
  278. int error, size;
  279. xfs_inode_t *dp;
  280. xfs_trans_t *tp;
  281. xfs_mount_t *mp;
  282. xfs_dir2_leaf_t *leaf;
  283. /*
  284. * Copy the existing (incorrect) block from the root node position
  285. * to a free space somewhere.
  286. */
  287. args = state->args;
  288. ASSERT(args != NULL);
  289. error = xfs_da_grow_inode(args, &blkno);
  290. if (error)
  291. return(error);
  292. dp = args->dp;
  293. tp = args->trans;
  294. mp = state->mp;
  295. error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
  296. if (error)
  297. return(error);
  298. ASSERT(bp != NULL);
  299. node = bp->data;
  300. oldroot = blk1->bp->data;
  301. if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
  302. size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
  303. (char *)oldroot);
  304. } else {
  305. ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
  306. leaf = (xfs_dir2_leaf_t *)oldroot;
  307. size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] -
  308. (char *)leaf);
  309. }
  310. memcpy(node, oldroot, size);
  311. xfs_da_log_buf(tp, bp, 0, size - 1);
  312. xfs_da_buf_done(blk1->bp);
  313. blk1->bp = bp;
  314. blk1->blkno = blkno;
  315. /*
  316. * Set up the new root node.
  317. */
  318. error = xfs_da_node_create(args,
  319. (args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
  320. be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
  321. if (error)
  322. return(error);
  323. node = bp->data;
  324. node->btree[0].hashval = cpu_to_be32(blk1->hashval);
  325. node->btree[0].before = cpu_to_be32(blk1->blkno);
  326. node->btree[1].hashval = cpu_to_be32(blk2->hashval);
  327. node->btree[1].before = cpu_to_be32(blk2->blkno);
  328. node->hdr.count = cpu_to_be16(2);
  329. #ifdef DEBUG
  330. if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
  331. ASSERT(blk1->blkno >= mp->m_dirleafblk &&
  332. blk1->blkno < mp->m_dirfreeblk);
  333. ASSERT(blk2->blkno >= mp->m_dirleafblk &&
  334. blk2->blkno < mp->m_dirfreeblk);
  335. }
  336. #endif
  337. /* Header is already logged by xfs_da_node_create */
  338. xfs_da_log_buf(tp, bp,
  339. XFS_DA_LOGRANGE(node, node->btree,
  340. sizeof(xfs_da_node_entry_t) * 2));
  341. xfs_da_buf_done(bp);
  342. return(0);
  343. }
  344. /*
  345. * Split the node, rebalance, then add the new entry.
  346. */
  347. STATIC int /* error */
  348. xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
  349. xfs_da_state_blk_t *newblk,
  350. xfs_da_state_blk_t *addblk,
  351. int treelevel, int *result)
  352. {
  353. xfs_da_intnode_t *node;
  354. xfs_dablk_t blkno;
  355. int newcount, error;
  356. int useextra;
  357. node = oldblk->bp->data;
  358. ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  359. /*
  360. * With V2 dirs the extra block is data or freespace.
  361. */
  362. useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
  363. newcount = 1 + useextra;
  364. /*
  365. * Do we have to split the node?
  366. */
  367. if ((be16_to_cpu(node->hdr.count) + newcount) > state->node_ents) {
  368. /*
  369. * Allocate a new node, add to the doubly linked chain of
  370. * nodes, then move some of our excess entries into it.
  371. */
  372. error = xfs_da_grow_inode(state->args, &blkno);
  373. if (error)
  374. return(error); /* GROT: dir is inconsistent */
  375. error = xfs_da_node_create(state->args, blkno, treelevel,
  376. &newblk->bp, state->args->whichfork);
  377. if (error)
  378. return(error); /* GROT: dir is inconsistent */
  379. newblk->blkno = blkno;
  380. newblk->magic = XFS_DA_NODE_MAGIC;
  381. xfs_da_node_rebalance(state, oldblk, newblk);
  382. error = xfs_da_blk_link(state, oldblk, newblk);
  383. if (error)
  384. return(error);
  385. *result = 1;
  386. } else {
  387. *result = 0;
  388. }
  389. /*
  390. * Insert the new entry(s) into the correct block
  391. * (updating last hashval in the process).
  392. *
  393. * xfs_da_node_add() inserts BEFORE the given index,
  394. * and as a result of using node_lookup_int() we always
  395. * point to a valid entry (not after one), but a split
  396. * operation always results in a new block whose hashvals
  397. * FOLLOW the current block.
  398. *
  399. * If we had double-split op below us, then add the extra block too.
  400. */
  401. node = oldblk->bp->data;
  402. if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
  403. oldblk->index++;
  404. xfs_da_node_add(state, oldblk, addblk);
  405. if (useextra) {
  406. if (state->extraafter)
  407. oldblk->index++;
  408. xfs_da_node_add(state, oldblk, &state->extrablk);
  409. state->extravalid = 0;
  410. }
  411. } else {
  412. newblk->index++;
  413. xfs_da_node_add(state, newblk, addblk);
  414. if (useextra) {
  415. if (state->extraafter)
  416. newblk->index++;
  417. xfs_da_node_add(state, newblk, &state->extrablk);
  418. state->extravalid = 0;
  419. }
  420. }
  421. return(0);
  422. }
  423. /*
  424. * Balance the btree elements between two intermediate nodes,
  425. * usually one full and one empty.
  426. *
  427. * NOTE: if blk2 is empty, then it will get the upper half of blk1.
  428. */
  429. STATIC void
  430. xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
  431. xfs_da_state_blk_t *blk2)
  432. {
  433. xfs_da_intnode_t *node1, *node2, *tmpnode;
  434. xfs_da_node_entry_t *btree_s, *btree_d;
  435. int count, tmp;
  436. xfs_trans_t *tp;
  437. node1 = blk1->bp->data;
  438. node2 = blk2->bp->data;
  439. /*
  440. * Figure out how many entries need to move, and in which direction.
  441. * Swap the nodes around if that makes it simpler.
  442. */
  443. if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
  444. ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) ||
  445. (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
  446. be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
  447. tmpnode = node1;
  448. node1 = node2;
  449. node2 = tmpnode;
  450. }
  451. ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  452. ASSERT(node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  453. count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2;
  454. if (count == 0)
  455. return;
  456. tp = state->args->trans;
  457. /*
  458. * Two cases: high-to-low and low-to-high.
  459. */
  460. if (count > 0) {
  461. /*
  462. * Move elements in node2 up to make a hole.
  463. */
  464. if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) {
  465. tmp *= (uint)sizeof(xfs_da_node_entry_t);
  466. btree_s = &node2->btree[0];
  467. btree_d = &node2->btree[count];
  468. memmove(btree_d, btree_s, tmp);
  469. }
  470. /*
  471. * Move the req'd B-tree elements from high in node1 to
  472. * low in node2.
  473. */
  474. be16_add_cpu(&node2->hdr.count, count);
  475. tmp = count * (uint)sizeof(xfs_da_node_entry_t);
  476. btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
  477. btree_d = &node2->btree[0];
  478. memcpy(btree_d, btree_s, tmp);
  479. be16_add_cpu(&node1->hdr.count, -count);
  480. } else {
  481. /*
  482. * Move the req'd B-tree elements from low in node2 to
  483. * high in node1.
  484. */
  485. count = -count;
  486. tmp = count * (uint)sizeof(xfs_da_node_entry_t);
  487. btree_s = &node2->btree[0];
  488. btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
  489. memcpy(btree_d, btree_s, tmp);
  490. be16_add_cpu(&node1->hdr.count, count);
  491. xfs_da_log_buf(tp, blk1->bp,
  492. XFS_DA_LOGRANGE(node1, btree_d, tmp));
  493. /*
  494. * Move elements in node2 down to fill the hole.
  495. */
  496. tmp = be16_to_cpu(node2->hdr.count) - count;
  497. tmp *= (uint)sizeof(xfs_da_node_entry_t);
  498. btree_s = &node2->btree[count];
  499. btree_d = &node2->btree[0];
  500. memmove(btree_d, btree_s, tmp);
  501. be16_add_cpu(&node2->hdr.count, -count);
  502. }
  503. /*
  504. * Log header of node 1 and all current bits of node 2.
  505. */
  506. xfs_da_log_buf(tp, blk1->bp,
  507. XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
  508. xfs_da_log_buf(tp, blk2->bp,
  509. XFS_DA_LOGRANGE(node2, &node2->hdr,
  510. sizeof(node2->hdr) +
  511. sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
  512. /*
  513. * Record the last hashval from each block for upward propagation.
  514. * (note: don't use the swapped node pointers)
  515. */
  516. node1 = blk1->bp->data;
  517. node2 = blk2->bp->data;
  518. blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
  519. blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
  520. /*
  521. * Adjust the expected index for insertion.
  522. */
  523. if (blk1->index >= be16_to_cpu(node1->hdr.count)) {
  524. blk2->index = blk1->index - be16_to_cpu(node1->hdr.count);
  525. blk1->index = be16_to_cpu(node1->hdr.count) + 1; /* make it invalid */
  526. }
  527. }
  528. /*
  529. * Add a new entry to an intermediate node.
  530. */
  531. STATIC void
  532. xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
  533. xfs_da_state_blk_t *newblk)
  534. {
  535. xfs_da_intnode_t *node;
  536. xfs_da_node_entry_t *btree;
  537. int tmp;
  538. node = oldblk->bp->data;
  539. ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  540. ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
  541. ASSERT(newblk->blkno != 0);
  542. if (state->args->whichfork == XFS_DATA_FORK)
  543. ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
  544. newblk->blkno < state->mp->m_dirfreeblk);
  545. /*
  546. * We may need to make some room before we insert the new node.
  547. */
  548. tmp = 0;
  549. btree = &node->btree[ oldblk->index ];
  550. if (oldblk->index < be16_to_cpu(node->hdr.count)) {
  551. tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree);
  552. memmove(btree + 1, btree, tmp);
  553. }
  554. btree->hashval = cpu_to_be32(newblk->hashval);
  555. btree->before = cpu_to_be32(newblk->blkno);
  556. xfs_da_log_buf(state->args->trans, oldblk->bp,
  557. XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
  558. be16_add_cpu(&node->hdr.count, 1);
  559. xfs_da_log_buf(state->args->trans, oldblk->bp,
  560. XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
  561. /*
  562. * Copy the last hash value from the oldblk to propagate upwards.
  563. */
  564. oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval);
  565. }
  566. /*========================================================================
  567. * Routines used for shrinking the Btree.
  568. *========================================================================*/
  569. /*
  570. * Deallocate an empty leaf node, remove it from its parent,
  571. * possibly deallocating that block, etc...
  572. */
  573. int
  574. xfs_da_join(xfs_da_state_t *state)
  575. {
  576. xfs_da_state_blk_t *drop_blk, *save_blk;
  577. int action, error;
  578. action = 0;
  579. drop_blk = &state->path.blk[ state->path.active-1 ];
  580. save_blk = &state->altpath.blk[ state->path.active-1 ];
  581. ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
  582. ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
  583. drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
  584. /*
  585. * Walk back up the tree joining/deallocating as necessary.
  586. * When we stop dropping blocks, break out.
  587. */
  588. for ( ; state->path.active >= 2; drop_blk--, save_blk--,
  589. state->path.active--) {
  590. /*
  591. * See if we can combine the block with a neighbor.
  592. * (action == 0) => no options, just leave
  593. * (action == 1) => coalesce, then unlink
  594. * (action == 2) => block empty, unlink it
  595. */
  596. switch (drop_blk->magic) {
  597. case XFS_ATTR_LEAF_MAGIC:
  598. error = xfs_attr_leaf_toosmall(state, &action);
  599. if (error)
  600. return(error);
  601. if (action == 0)
  602. return(0);
  603. xfs_attr_leaf_unbalance(state, drop_blk, save_blk);
  604. break;
  605. case XFS_DIR2_LEAFN_MAGIC:
  606. error = xfs_dir2_leafn_toosmall(state, &action);
  607. if (error)
  608. return error;
  609. if (action == 0)
  610. return 0;
  611. xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
  612. break;
  613. case XFS_DA_NODE_MAGIC:
  614. /*
  615. * Remove the offending node, fixup hashvals,
  616. * check for a toosmall neighbor.
  617. */
  618. xfs_da_node_remove(state, drop_blk);
  619. xfs_da_fixhashpath(state, &state->path);
  620. error = xfs_da_node_toosmall(state, &action);
  621. if (error)
  622. return(error);
  623. if (action == 0)
  624. return 0;
  625. xfs_da_node_unbalance(state, drop_blk, save_blk);
  626. break;
  627. }
  628. xfs_da_fixhashpath(state, &state->altpath);
  629. error = xfs_da_blk_unlink(state, drop_blk, save_blk);
  630. xfs_da_state_kill_altpath(state);
  631. if (error)
  632. return(error);
  633. error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
  634. drop_blk->bp);
  635. drop_blk->bp = NULL;
  636. if (error)
  637. return(error);
  638. }
  639. /*
  640. * We joined all the way to the top. If it turns out that
  641. * we only have one entry in the root, make the child block
  642. * the new root.
  643. */
  644. xfs_da_node_remove(state, drop_blk);
  645. xfs_da_fixhashpath(state, &state->path);
  646. error = xfs_da_root_join(state, &state->path.blk[0]);
  647. return(error);
  648. }
  649. #ifdef DEBUG
  650. static void
  651. xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
  652. {
  653. __be16 magic = blkinfo->magic;
  654. if (level == 1) {
  655. ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
  656. magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
  657. } else
  658. ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  659. ASSERT(!blkinfo->forw);
  660. ASSERT(!blkinfo->back);
  661. }
  662. #else /* !DEBUG */
  663. #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
  664. #endif /* !DEBUG */
  665. /*
  666. * We have only one entry in the root. Copy the only remaining child of
  667. * the old root to block 0 as the new root node.
  668. */
  669. STATIC int
  670. xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
  671. {
  672. xfs_da_intnode_t *oldroot;
  673. xfs_da_args_t *args;
  674. xfs_dablk_t child;
  675. xfs_dabuf_t *bp;
  676. int error;
  677. args = state->args;
  678. ASSERT(args != NULL);
  679. ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
  680. oldroot = root_blk->bp->data;
  681. ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  682. ASSERT(!oldroot->hdr.info.forw);
  683. ASSERT(!oldroot->hdr.info.back);
  684. /*
  685. * If the root has more than one child, then don't do anything.
  686. */
  687. if (be16_to_cpu(oldroot->hdr.count) > 1)
  688. return(0);
  689. /*
  690. * Read in the (only) child block, then copy those bytes into
  691. * the root block's buffer and free the original child block.
  692. */
  693. child = be32_to_cpu(oldroot->btree[0].before);
  694. ASSERT(child != 0);
  695. error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp,
  696. args->whichfork);
  697. if (error)
  698. return(error);
  699. ASSERT(bp != NULL);
  700. xfs_da_blkinfo_onlychild_validate(bp->data,
  701. be16_to_cpu(oldroot->hdr.level));
  702. memcpy(root_blk->bp->data, bp->data, state->blocksize);
  703. xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
  704. error = xfs_da_shrink_inode(args, child, bp);
  705. return(error);
  706. }
  707. /*
  708. * Check a node block and its neighbors to see if the block should be
  709. * collapsed into one or the other neighbor. Always keep the block
  710. * with the smaller block number.
  711. * If the current block is over 50% full, don't try to join it, return 0.
  712. * If the block is empty, fill in the state structure and return 2.
  713. * If it can be collapsed, fill in the state structure and return 1.
  714. * If nothing can be done, return 0.
  715. */
  716. STATIC int
  717. xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
  718. {
  719. xfs_da_intnode_t *node;
  720. xfs_da_state_blk_t *blk;
  721. xfs_da_blkinfo_t *info;
  722. int count, forward, error, retval, i;
  723. xfs_dablk_t blkno;
  724. xfs_dabuf_t *bp;
  725. /*
  726. * Check for the degenerate case of the block being over 50% full.
  727. * If so, it's not worth even looking to see if we might be able
  728. * to coalesce with a sibling.
  729. */
  730. blk = &state->path.blk[ state->path.active-1 ];
  731. info = blk->bp->data;
  732. ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  733. node = (xfs_da_intnode_t *)info;
  734. count = be16_to_cpu(node->hdr.count);
  735. if (count > (state->node_ents >> 1)) {
  736. *action = 0; /* blk over 50%, don't try to join */
  737. return(0); /* blk over 50%, don't try to join */
  738. }
  739. /*
  740. * Check for the degenerate case of the block being empty.
  741. * If the block is empty, we'll simply delete it, no need to
  742. * coalesce it with a sibling block. We choose (arbitrarily)
  743. * to merge with the forward block unless it is NULL.
  744. */
  745. if (count == 0) {
  746. /*
  747. * Make altpath point to the block we want to keep and
  748. * path point to the block we want to drop (this one).
  749. */
  750. forward = (info->forw != 0);
  751. memcpy(&state->altpath, &state->path, sizeof(state->path));
  752. error = xfs_da_path_shift(state, &state->altpath, forward,
  753. 0, &retval);
  754. if (error)
  755. return(error);
  756. if (retval) {
  757. *action = 0;
  758. } else {
  759. *action = 2;
  760. }
  761. return(0);
  762. }
  763. /*
  764. * Examine each sibling block to see if we can coalesce with
  765. * at least 25% free space to spare. We need to figure out
  766. * whether to merge with the forward or the backward block.
  767. * We prefer coalescing with the lower numbered sibling so as
  768. * to shrink a directory over time.
  769. */
  770. /* start with smaller blk num */
  771. forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
  772. for (i = 0; i < 2; forward = !forward, i++) {
  773. if (forward)
  774. blkno = be32_to_cpu(info->forw);
  775. else
  776. blkno = be32_to_cpu(info->back);
  777. if (blkno == 0)
  778. continue;
  779. error = xfs_da_read_buf(state->args->trans, state->args->dp,
  780. blkno, -1, &bp, state->args->whichfork);
  781. if (error)
  782. return(error);
  783. ASSERT(bp != NULL);
  784. node = (xfs_da_intnode_t *)info;
  785. count = state->node_ents;
  786. count -= state->node_ents >> 2;
  787. count -= be16_to_cpu(node->hdr.count);
  788. node = bp->data;
  789. ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  790. count -= be16_to_cpu(node->hdr.count);
  791. xfs_da_brelse(state->args->trans, bp);
  792. if (count >= 0)
  793. break; /* fits with at least 25% to spare */
  794. }
  795. if (i >= 2) {
  796. *action = 0;
  797. return(0);
  798. }
  799. /*
  800. * Make altpath point to the block we want to keep (the lower
  801. * numbered block) and path point to the block we want to drop.
  802. */
  803. memcpy(&state->altpath, &state->path, sizeof(state->path));
  804. if (blkno < blk->blkno) {
  805. error = xfs_da_path_shift(state, &state->altpath, forward,
  806. 0, &retval);
  807. if (error) {
  808. return(error);
  809. }
  810. if (retval) {
  811. *action = 0;
  812. return(0);
  813. }
  814. } else {
  815. error = xfs_da_path_shift(state, &state->path, forward,
  816. 0, &retval);
  817. if (error) {
  818. return(error);
  819. }
  820. if (retval) {
  821. *action = 0;
  822. return(0);
  823. }
  824. }
  825. *action = 1;
  826. return(0);
  827. }
  828. /*
  829. * Walk back up the tree adjusting hash values as necessary,
  830. * when we stop making changes, return.
  831. */
  832. void
  833. xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
  834. {
  835. xfs_da_state_blk_t *blk;
  836. xfs_da_intnode_t *node;
  837. xfs_da_node_entry_t *btree;
  838. xfs_dahash_t lasthash=0;
  839. int level, count;
  840. level = path->active-1;
  841. blk = &path->blk[ level ];
  842. switch (blk->magic) {
  843. case XFS_ATTR_LEAF_MAGIC:
  844. lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
  845. if (count == 0)
  846. return;
  847. break;
  848. case XFS_DIR2_LEAFN_MAGIC:
  849. lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
  850. if (count == 0)
  851. return;
  852. break;
  853. case XFS_DA_NODE_MAGIC:
  854. lasthash = xfs_da_node_lasthash(blk->bp, &count);
  855. if (count == 0)
  856. return;
  857. break;
  858. }
  859. for (blk--, level--; level >= 0; blk--, level--) {
  860. node = blk->bp->data;
  861. ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  862. btree = &node->btree[ blk->index ];
  863. if (be32_to_cpu(btree->hashval) == lasthash)
  864. break;
  865. blk->hashval = lasthash;
  866. btree->hashval = cpu_to_be32(lasthash);
  867. xfs_da_log_buf(state->args->trans, blk->bp,
  868. XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
  869. lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
  870. }
  871. }
  872. /*
  873. * Remove an entry from an intermediate node.
  874. */
  875. STATIC void
  876. xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
  877. {
  878. xfs_da_intnode_t *node;
  879. xfs_da_node_entry_t *btree;
  880. int tmp;
  881. node = drop_blk->bp->data;
  882. ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
  883. ASSERT(drop_blk->index >= 0);
  884. /*
  885. * Copy over the offending entry, or just zero it out.
  886. */
  887. btree = &node->btree[drop_blk->index];
  888. if (drop_blk->index < (be16_to_cpu(node->hdr.count)-1)) {
  889. tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
  890. tmp *= (uint)sizeof(xfs_da_node_entry_t);
  891. memmove(btree, btree + 1, tmp);
  892. xfs_da_log_buf(state->args->trans, drop_blk->bp,
  893. XFS_DA_LOGRANGE(node, btree, tmp));
  894. btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
  895. }
  896. memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
  897. xfs_da_log_buf(state->args->trans, drop_blk->bp,
  898. XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
  899. be16_add_cpu(&node->hdr.count, -1);
  900. xfs_da_log_buf(state->args->trans, drop_blk->bp,
  901. XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
  902. /*
  903. * Copy the last hash value from the block to propagate upwards.
  904. */
  905. btree--;
  906. drop_blk->hashval = be32_to_cpu(btree->hashval);
  907. }
  908. /*
  909. * Unbalance the btree elements between two intermediate nodes,
  910. * move all Btree elements from one node into another.
  911. */
  912. STATIC void
  913. xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
  914. xfs_da_state_blk_t *save_blk)
  915. {
  916. xfs_da_intnode_t *drop_node, *save_node;
  917. xfs_da_node_entry_t *btree;
  918. int tmp;
  919. xfs_trans_t *tp;
  920. drop_node = drop_blk->bp->data;
  921. save_node = save_blk->bp->data;
  922. ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  923. ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  924. tp = state->args->trans;
  925. /*
  926. * If the dying block has lower hashvals, then move all the
  927. * elements in the remaining block up to make a hole.
  928. */
  929. if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) ||
  930. (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) <
  931. be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval)))
  932. {
  933. btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)];
  934. tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
  935. memmove(btree, &save_node->btree[0], tmp);
  936. btree = &save_node->btree[0];
  937. xfs_da_log_buf(tp, save_blk->bp,
  938. XFS_DA_LOGRANGE(save_node, btree,
  939. (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
  940. sizeof(xfs_da_node_entry_t)));
  941. } else {
  942. btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
  943. xfs_da_log_buf(tp, save_blk->bp,
  944. XFS_DA_LOGRANGE(save_node, btree,
  945. be16_to_cpu(drop_node->hdr.count) *
  946. sizeof(xfs_da_node_entry_t)));
  947. }
  948. /*
  949. * Move all the B-tree elements from drop_blk to save_blk.
  950. */
  951. tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
  952. memcpy(btree, &drop_node->btree[0], tmp);
  953. be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
  954. xfs_da_log_buf(tp, save_blk->bp,
  955. XFS_DA_LOGRANGE(save_node, &save_node->hdr,
  956. sizeof(save_node->hdr)));
  957. /*
  958. * Save the last hashval in the remaining block for upward propagation.
  959. */
  960. save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval);
  961. }
  962. /*========================================================================
  963. * Routines used for finding things in the Btree.
  964. *========================================================================*/
  965. /*
  966. * Walk down the Btree looking for a particular filename, filling
  967. * in the state structure as we go.
  968. *
  969. * We will set the state structure to point to each of the elements
  970. * in each of the nodes where either the hashval is or should be.
  971. *
  972. * We support duplicate hashval's so for each entry in the current
  973. * node that could contain the desired hashval, descend. This is a
  974. * pruned depth-first tree search.
  975. */
  976. int /* error */
  977. xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
  978. {
  979. xfs_da_state_blk_t *blk;
  980. xfs_da_blkinfo_t *curr;
  981. xfs_da_intnode_t *node;
  982. xfs_da_node_entry_t *btree;
  983. xfs_dablk_t blkno;
  984. int probe, span, max, error, retval;
  985. xfs_dahash_t hashval, btreehashval;
  986. xfs_da_args_t *args;
  987. args = state->args;
  988. /*
  989. * Descend thru the B-tree searching each level for the right
  990. * node to use, until the right hashval is found.
  991. */
  992. blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
  993. for (blk = &state->path.blk[0], state->path.active = 1;
  994. state->path.active <= XFS_DA_NODE_MAXDEPTH;
  995. blk++, state->path.active++) {
  996. /*
  997. * Read the next node down in the tree.
  998. */
  999. blk->blkno = blkno;
  1000. error = xfs_da_read_buf(args->trans, args->dp, blkno,
  1001. -1, &blk->bp, args->whichfork);
  1002. if (error) {
  1003. blk->blkno = 0;
  1004. state->path.active--;
  1005. return(error);
  1006. }
  1007. curr = blk->bp->data;
  1008. blk->magic = be16_to_cpu(curr->magic);
  1009. ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
  1010. blk->magic == XFS_DIR2_LEAFN_MAGIC ||
  1011. blk->magic == XFS_ATTR_LEAF_MAGIC);
  1012. /*
  1013. * Search an intermediate node for a match.
  1014. */
  1015. if (blk->magic == XFS_DA_NODE_MAGIC) {
  1016. node = blk->bp->data;
  1017. max = be16_to_cpu(node->hdr.count);
  1018. blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
  1019. /*
  1020. * Binary search. (note: small blocks will skip loop)
  1021. */
  1022. probe = span = max / 2;
  1023. hashval = args->hashval;
  1024. for (btree = &node->btree[probe]; span > 4;
  1025. btree = &node->btree[probe]) {
  1026. span /= 2;
  1027. btreehashval = be32_to_cpu(btree->hashval);
  1028. if (btreehashval < hashval)
  1029. probe += span;
  1030. else if (btreehashval > hashval)
  1031. probe -= span;
  1032. else
  1033. break;
  1034. }
  1035. ASSERT((probe >= 0) && (probe < max));
  1036. ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval));
  1037. /*
  1038. * Since we may have duplicate hashval's, find the first
  1039. * matching hashval in the node.
  1040. */
  1041. while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) {
  1042. btree--;
  1043. probe--;
  1044. }
  1045. while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) {
  1046. btree++;
  1047. probe++;
  1048. }
  1049. /*
  1050. * Pick the right block to descend on.
  1051. */
  1052. if (probe == max) {
  1053. blk->index = max-1;
  1054. blkno = be32_to_cpu(node->btree[max-1].before);
  1055. } else {
  1056. blk->index = probe;
  1057. blkno = be32_to_cpu(btree->before);
  1058. }
  1059. } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
  1060. blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
  1061. break;
  1062. } else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
  1063. blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
  1064. break;
  1065. }
  1066. }
  1067. /*
  1068. * A leaf block that ends in the hashval that we are interested in
  1069. * (final hashval == search hashval) means that the next block may
  1070. * contain more entries with the same hashval, shift upward to the
  1071. * next leaf and keep searching.
  1072. */
  1073. for (;;) {
  1074. if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
  1075. retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
  1076. &blk->index, state);
  1077. } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
  1078. retval = xfs_attr_leaf_lookup_int(blk->bp, args);
  1079. blk->index = args->index;
  1080. args->blkno = blk->blkno;
  1081. } else {
  1082. ASSERT(0);
  1083. return XFS_ERROR(EFSCORRUPTED);
  1084. }
  1085. if (((retval == ENOENT) || (retval == ENOATTR)) &&
  1086. (blk->hashval == args->hashval)) {
  1087. error = xfs_da_path_shift(state, &state->path, 1, 1,
  1088. &retval);
  1089. if (error)
  1090. return(error);
  1091. if (retval == 0) {
  1092. continue;
  1093. } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
  1094. /* path_shift() gives ENOENT */
  1095. retval = XFS_ERROR(ENOATTR);
  1096. }
  1097. }
  1098. break;
  1099. }
  1100. *result = retval;
  1101. return(0);
  1102. }
  1103. /*========================================================================
  1104. * Utility routines.
  1105. *========================================================================*/
  1106. /*
  1107. * Link a new block into a doubly linked list of blocks (of whatever type).
  1108. */
  1109. int /* error */
  1110. xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
  1111. xfs_da_state_blk_t *new_blk)
  1112. {
  1113. xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
  1114. xfs_da_args_t *args;
  1115. int before=0, error;
  1116. xfs_dabuf_t *bp;
  1117. /*
  1118. * Set up environment.
  1119. */
  1120. args = state->args;
  1121. ASSERT(args != NULL);
  1122. old_info = old_blk->bp->data;
  1123. new_info = new_blk->bp->data;
  1124. ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
  1125. old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
  1126. old_blk->magic == XFS_ATTR_LEAF_MAGIC);
  1127. ASSERT(old_blk->magic == be16_to_cpu(old_info->magic));
  1128. ASSERT(new_blk->magic == be16_to_cpu(new_info->magic));
  1129. ASSERT(old_blk->magic == new_blk->magic);
  1130. switch (old_blk->magic) {
  1131. case XFS_ATTR_LEAF_MAGIC:
  1132. before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
  1133. break;
  1134. case XFS_DIR2_LEAFN_MAGIC:
  1135. before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
  1136. break;
  1137. case XFS_DA_NODE_MAGIC:
  1138. before = xfs_da_node_order(old_blk->bp, new_blk->bp);
  1139. break;
  1140. }
  1141. /*
  1142. * Link blocks in appropriate order.
  1143. */
  1144. if (before) {
  1145. /*
  1146. * Link new block in before existing block.
  1147. */
  1148. new_info->forw = cpu_to_be32(old_blk->blkno);
  1149. new_info->back = old_info->back;
  1150. if (old_info->back) {
  1151. error = xfs_da_read_buf(args->trans, args->dp,
  1152. be32_to_cpu(old_info->back),
  1153. -1, &bp, args->whichfork);
  1154. if (error)
  1155. return(error);
  1156. ASSERT(bp != NULL);
  1157. tmp_info = bp->data;
  1158. ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
  1159. ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
  1160. tmp_info->forw = cpu_to_be32(new_blk->blkno);
  1161. xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
  1162. xfs_da_buf_done(bp);
  1163. }
  1164. old_info->back = cpu_to_be32(new_blk->blkno);
  1165. } else {
  1166. /*
  1167. * Link new block in after existing block.
  1168. */
  1169. new_info->forw = old_info->forw;
  1170. new_info->back = cpu_to_be32(old_blk->blkno);
  1171. if (old_info->forw) {
  1172. error = xfs_da_read_buf(args->trans, args->dp,
  1173. be32_to_cpu(old_info->forw),
  1174. -1, &bp, args->whichfork);
  1175. if (error)
  1176. return(error);
  1177. ASSERT(bp != NULL);
  1178. tmp_info = bp->data;
  1179. ASSERT(tmp_info->magic == old_info->magic);
  1180. ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
  1181. tmp_info->back = cpu_to_be32(new_blk->blkno);
  1182. xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
  1183. xfs_da_buf_done(bp);
  1184. }
  1185. old_info->forw = cpu_to_be32(new_blk->blkno);
  1186. }
  1187. xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
  1188. xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
  1189. return(0);
  1190. }
  1191. /*
  1192. * Compare two intermediate nodes for "order".
  1193. */
  1194. STATIC int
  1195. xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
  1196. {
  1197. xfs_da_intnode_t *node1, *node2;
  1198. node1 = node1_bp->data;
  1199. node2 = node2_bp->data;
  1200. ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
  1201. node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  1202. if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
  1203. ((be32_to_cpu(node2->btree[0].hashval) <
  1204. be32_to_cpu(node1->btree[0].hashval)) ||
  1205. (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
  1206. be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
  1207. return(1);
  1208. }
  1209. return(0);
  1210. }
  1211. /*
  1212. * Pick up the last hashvalue from an intermediate node.
  1213. */
  1214. STATIC uint
  1215. xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
  1216. {
  1217. xfs_da_intnode_t *node;
  1218. node = bp->data;
  1219. ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  1220. if (count)
  1221. *count = be16_to_cpu(node->hdr.count);
  1222. if (!node->hdr.count)
  1223. return(0);
  1224. return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
  1225. }
  1226. /*
  1227. * Unlink a block from a doubly linked list of blocks.
  1228. */
  1229. STATIC int /* error */
  1230. xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
  1231. xfs_da_state_blk_t *save_blk)
  1232. {
  1233. xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
  1234. xfs_da_args_t *args;
  1235. xfs_dabuf_t *bp;
  1236. int error;
  1237. /*
  1238. * Set up environment.
  1239. */
  1240. args = state->args;
  1241. ASSERT(args != NULL);
  1242. save_info = save_blk->bp->data;
  1243. drop_info = drop_blk->bp->data;
  1244. ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
  1245. save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
  1246. save_blk->magic == XFS_ATTR_LEAF_MAGIC);
  1247. ASSERT(save_blk->magic == be16_to_cpu(save_info->magic));
  1248. ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic));
  1249. ASSERT(save_blk->magic == drop_blk->magic);
  1250. ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
  1251. (be32_to_cpu(save_info->back) == drop_blk->blkno));
  1252. ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
  1253. (be32_to_cpu(drop_info->back) == save_blk->blkno));
  1254. /*
  1255. * Unlink the leaf block from the doubly linked chain of leaves.
  1256. */
  1257. if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
  1258. save_info->back = drop_info->back;
  1259. if (drop_info->back) {
  1260. error = xfs_da_read_buf(args->trans, args->dp,
  1261. be32_to_cpu(drop_info->back),
  1262. -1, &bp, args->whichfork);
  1263. if (error)
  1264. return(error);
  1265. ASSERT(bp != NULL);
  1266. tmp_info = bp->data;
  1267. ASSERT(tmp_info->magic == save_info->magic);
  1268. ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
  1269. tmp_info->forw = cpu_to_be32(save_blk->blkno);
  1270. xfs_da_log_buf(args->trans, bp, 0,
  1271. sizeof(*tmp_info) - 1);
  1272. xfs_da_buf_done(bp);
  1273. }
  1274. } else {
  1275. save_info->forw = drop_info->forw;
  1276. if (drop_info->forw) {
  1277. error = xfs_da_read_buf(args->trans, args->dp,
  1278. be32_to_cpu(drop_info->forw),
  1279. -1, &bp, args->whichfork);
  1280. if (error)
  1281. return(error);
  1282. ASSERT(bp != NULL);
  1283. tmp_info = bp->data;
  1284. ASSERT(tmp_info->magic == save_info->magic);
  1285. ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
  1286. tmp_info->back = cpu_to_be32(save_blk->blkno);
  1287. xfs_da_log_buf(args->trans, bp, 0,
  1288. sizeof(*tmp_info) - 1);
  1289. xfs_da_buf_done(bp);
  1290. }
  1291. }
  1292. xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
  1293. return(0);
  1294. }
  1295. /*
  1296. * Move a path "forward" or "!forward" one block at the current level.
  1297. *
  1298. * This routine will adjust a "path" to point to the next block
  1299. * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
  1300. * Btree, including updating pointers to the intermediate nodes between
  1301. * the new bottom and the root.
  1302. */
  1303. int /* error */
  1304. xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
  1305. int forward, int release, int *result)
  1306. {
  1307. xfs_da_state_blk_t *blk;
  1308. xfs_da_blkinfo_t *info;
  1309. xfs_da_intnode_t *node;
  1310. xfs_da_args_t *args;
  1311. xfs_dablk_t blkno=0;
  1312. int level, error;
  1313. /*
  1314. * Roll up the Btree looking for the first block where our
  1315. * current index is not at the edge of the block. Note that
  1316. * we skip the bottom layer because we want the sibling block.
  1317. */
  1318. args = state->args;
  1319. ASSERT(args != NULL);
  1320. ASSERT(path != NULL);
  1321. ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
  1322. level = (path->active-1) - 1; /* skip bottom layer in path */
  1323. for (blk = &path->blk[level]; level >= 0; blk--, level--) {
  1324. ASSERT(blk->bp != NULL);
  1325. node = blk->bp->data;
  1326. ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  1327. if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
  1328. blk->index++;
  1329. blkno = be32_to_cpu(node->btree[blk->index].before);
  1330. break;
  1331. } else if (!forward && (blk->index > 0)) {
  1332. blk->index--;
  1333. blkno = be32_to_cpu(node->btree[blk->index].before);
  1334. break;
  1335. }
  1336. }
  1337. if (level < 0) {
  1338. *result = XFS_ERROR(ENOENT); /* we're out of our tree */
  1339. ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
  1340. return(0);
  1341. }
  1342. /*
  1343. * Roll down the edge of the subtree until we reach the
  1344. * same depth we were at originally.
  1345. */
  1346. for (blk++, level++; level < path->active; blk++, level++) {
  1347. /*
  1348. * Release the old block.
  1349. * (if it's dirty, trans won't actually let go)
  1350. */
  1351. if (release)
  1352. xfs_da_brelse(args->trans, blk->bp);
  1353. /*
  1354. * Read the next child block.
  1355. */
  1356. blk->blkno = blkno;
  1357. error = xfs_da_read_buf(args->trans, args->dp, blkno, -1,
  1358. &blk->bp, args->whichfork);
  1359. if (error)
  1360. return(error);
  1361. ASSERT(blk->bp != NULL);
  1362. info = blk->bp->data;
  1363. ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
  1364. info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
  1365. info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
  1366. blk->magic = be16_to_cpu(info->magic);
  1367. if (blk->magic == XFS_DA_NODE_MAGIC) {
  1368. node = (xfs_da_intnode_t *)info;
  1369. blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
  1370. if (forward)
  1371. blk->index = 0;
  1372. else
  1373. blk->index = be16_to_cpu(node->hdr.count)-1;
  1374. blkno = be32_to_cpu(node->btree[blk->index].before);
  1375. } else {
  1376. ASSERT(level == path->active-1);
  1377. blk->index = 0;
  1378. switch(blk->magic) {
  1379. case XFS_ATTR_LEAF_MAGIC:
  1380. blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
  1381. NULL);
  1382. break;
  1383. case XFS_DIR2_LEAFN_MAGIC:
  1384. blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
  1385. NULL);
  1386. break;
  1387. default:
  1388. ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC ||
  1389. blk->magic == XFS_DIR2_LEAFN_MAGIC);
  1390. break;
  1391. }
  1392. }
  1393. }
  1394. *result = 0;
  1395. return(0);
  1396. }
  1397. /*========================================================================
  1398. * Utility routines.
  1399. *========================================================================*/
  1400. /*
  1401. * Implement a simple hash on a character string.
  1402. * Rotate the hash value by 7 bits, then XOR each character in.
  1403. * This is implemented with some source-level loop unrolling.
  1404. */
  1405. xfs_dahash_t
  1406. xfs_da_hashname(const __uint8_t *name, int namelen)
  1407. {
  1408. xfs_dahash_t hash;
  1409. /*
  1410. * Do four characters at a time as long as we can.
  1411. */
  1412. for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
  1413. hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
  1414. (name[3] << 0) ^ rol32(hash, 7 * 4);
  1415. /*
  1416. * Now do the rest of the characters.
  1417. */
  1418. switch (namelen) {
  1419. case 3:
  1420. return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
  1421. rol32(hash, 7 * 3);
  1422. case 2:
  1423. return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
  1424. case 1:
  1425. return (name[0] << 0) ^ rol32(hash, 7 * 1);
  1426. default: /* case 0: */
  1427. return hash;
  1428. }
  1429. }
  1430. enum xfs_dacmp
  1431. xfs_da_compname(
  1432. struct xfs_da_args *args,
  1433. const unsigned char *name,
  1434. int len)
  1435. {
  1436. return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
  1437. XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
  1438. }
  1439. static xfs_dahash_t
  1440. xfs_default_hashname(
  1441. struct xfs_name *name)
  1442. {
  1443. return xfs_da_hashname(name->name, name->len);
  1444. }
  1445. const struct xfs_nameops xfs_default_nameops = {
  1446. .hashname = xfs_default_hashname,
  1447. .compname = xfs_da_compname
  1448. };
  1449. int
  1450. xfs_da_grow_inode_int(
  1451. struct xfs_da_args *args,
  1452. xfs_fileoff_t *bno,
  1453. int count)
  1454. {
  1455. struct xfs_trans *tp = args->trans;
  1456. struct xfs_inode *dp = args->dp;
  1457. int w = args->whichfork;
  1458. xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
  1459. struct xfs_bmbt_irec map, *mapp;
  1460. int nmap, error, got, i, mapi;
  1461. /*
  1462. * Find a spot in the file space to put the new block.
  1463. */
  1464. error = xfs_bmap_first_unused(tp, dp, count, bno, w);
  1465. if (error)
  1466. return error;
  1467. /*
  1468. * Try mapping it in one filesystem block.
  1469. */
  1470. nmap = 1;
  1471. ASSERT(args->firstblock != NULL);
  1472. error = xfs_bmapi(tp, dp, *bno, count,
  1473. xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
  1474. XFS_BMAPI_CONTIG,
  1475. args->firstblock, args->total, &map, &nmap,
  1476. args->flist);
  1477. if (error)
  1478. return error;
  1479. ASSERT(nmap <= 1);
  1480. if (nmap == 1) {
  1481. mapp = &map;
  1482. mapi = 1;
  1483. } else if (nmap == 0 && count > 1) {
  1484. xfs_fileoff_t b;
  1485. int c;
  1486. /*
  1487. * If we didn't get it and the block might work if fragmented,
  1488. * try without the CONTIG flag. Loop until we get it all.
  1489. */
  1490. mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
  1491. for (b = *bno, mapi = 0; b < *bno + count; ) {
  1492. nmap = MIN(XFS_BMAP_MAX_NMAP, count);
  1493. c = (int)(*bno + count - b);
  1494. error = xfs_bmapi(tp, dp, b, c,
  1495. xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|
  1496. XFS_BMAPI_METADATA,
  1497. args->firstblock, args->total,
  1498. &mapp[mapi], &nmap, args->flist);
  1499. if (error)
  1500. goto out_free_map;
  1501. if (nmap < 1)
  1502. break;
  1503. mapi += nmap;
  1504. b = mapp[mapi - 1].br_startoff +
  1505. mapp[mapi - 1].br_blockcount;
  1506. }
  1507. } else {
  1508. mapi = 0;
  1509. mapp = NULL;
  1510. }
  1511. /*
  1512. * Count the blocks we got, make sure it matches the total.
  1513. */
  1514. for (i = 0, got = 0; i < mapi; i++)
  1515. got += mapp[i].br_blockcount;
  1516. if (got != count || mapp[0].br_startoff != *bno ||
  1517. mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
  1518. *bno + count) {
  1519. error = XFS_ERROR(ENOSPC);
  1520. goto out_free_map;
  1521. }
  1522. /* account for newly allocated blocks in reserved blocks total */
  1523. args->total -= dp->i_d.di_nblocks - nblks;
  1524. out_free_map:
  1525. if (mapp != &map)
  1526. kmem_free(mapp);
  1527. return error;
  1528. }
  1529. /*
  1530. * Add a block to the btree ahead of the file.
  1531. * Return the new block number to the caller.
  1532. */
  1533. int
  1534. xfs_da_grow_inode(
  1535. struct xfs_da_args *args,
  1536. xfs_dablk_t *new_blkno)
  1537. {
  1538. xfs_fileoff_t bno;
  1539. int count;
  1540. int error;
  1541. if (args->whichfork == XFS_DATA_FORK) {
  1542. bno = args->dp->i_mount->m_dirleafblk;
  1543. count = args->dp->i_mount->m_dirblkfsbs;
  1544. } else {
  1545. bno = 0;
  1546. count = 1;
  1547. }
  1548. error = xfs_da_grow_inode_int(args, &bno, count);
  1549. if (!error)
  1550. *new_blkno = (xfs_dablk_t)bno;
  1551. return error;
  1552. }
  1553. /*
  1554. * Ick. We need to always be able to remove a btree block, even
  1555. * if there's no space reservation because the filesystem is full.
  1556. * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
  1557. * It swaps the target block with the last block in the file. The
  1558. * last block in the file can always be removed since it can't cause
  1559. * a bmap btree split to do that.
  1560. */
  1561. STATIC int
  1562. xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
  1563. xfs_dabuf_t **dead_bufp)
  1564. {
  1565. xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
  1566. xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf;
  1567. xfs_fileoff_t lastoff;
  1568. xfs_inode_t *ip;
  1569. xfs_trans_t *tp;
  1570. xfs_mount_t *mp;
  1571. int error, w, entno, level, dead_level;
  1572. xfs_da_blkinfo_t *dead_info, *sib_info;
  1573. xfs_da_intnode_t *par_node, *dead_node;
  1574. xfs_dir2_leaf_t *dead_leaf2;
  1575. xfs_dahash_t dead_hash;
  1576. dead_buf = *dead_bufp;
  1577. dead_blkno = *dead_blknop;
  1578. tp = args->trans;
  1579. ip = args->dp;
  1580. w = args->whichfork;
  1581. ASSERT(w == XFS_DATA_FORK);
  1582. mp = ip->i_mount;
  1583. lastoff = mp->m_dirfreeblk;
  1584. error = xfs_bmap_last_before(tp, ip, &lastoff, w);
  1585. if (error)
  1586. return error;
  1587. if (unlikely(lastoff == 0)) {
  1588. XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
  1589. mp);
  1590. return XFS_ERROR(EFSCORRUPTED);
  1591. }
  1592. /*
  1593. * Read the last block in the btree space.
  1594. */
  1595. last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
  1596. if ((error = xfs_da_read_buf(tp, ip, last_blkno, -1, &last_buf, w)))
  1597. return error;
  1598. /*
  1599. * Copy the last block into the dead buffer and log it.
  1600. */
  1601. memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize);
  1602. xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
  1603. dead_info = dead_buf->data;
  1604. /*
  1605. * Get values from the moved block.
  1606. */
  1607. if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
  1608. dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
  1609. dead_level = 0;
  1610. dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval);
  1611. } else {
  1612. ASSERT(dead_info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  1613. dead_node = (xfs_da_intnode_t *)dead_info;
  1614. dead_level = be16_to_cpu(dead_node->hdr.level);
  1615. dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval);
  1616. }
  1617. sib_buf = par_buf = NULL;
  1618. /*
  1619. * If the moved block has a left sibling, fix up the pointers.
  1620. */
  1621. if ((sib_blkno = be32_to_cpu(dead_info->back))) {
  1622. if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
  1623. goto done;
  1624. sib_info = sib_buf->data;
  1625. if (unlikely(
  1626. be32_to_cpu(sib_info->forw) != last_blkno ||
  1627. sib_info->magic != dead_info->magic)) {
  1628. XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
  1629. XFS_ERRLEVEL_LOW, mp);
  1630. error = XFS_ERROR(EFSCORRUPTED);
  1631. goto done;
  1632. }
  1633. sib_info->forw = cpu_to_be32(dead_blkno);
  1634. xfs_da_log_buf(tp, sib_buf,
  1635. XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
  1636. sizeof(sib_info->forw)));
  1637. xfs_da_buf_done(sib_buf);
  1638. sib_buf = NULL;
  1639. }
  1640. /*
  1641. * If the moved block has a right sibling, fix up the pointers.
  1642. */
  1643. if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
  1644. if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
  1645. goto done;
  1646. sib_info = sib_buf->data;
  1647. if (unlikely(
  1648. be32_to_cpu(sib_info->back) != last_blkno ||
  1649. sib_info->magic != dead_info->magic)) {
  1650. XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
  1651. XFS_ERRLEVEL_LOW, mp);
  1652. error = XFS_ERROR(EFSCORRUPTED);
  1653. goto done;
  1654. }
  1655. sib_info->back = cpu_to_be32(dead_blkno);
  1656. xfs_da_log_buf(tp, sib_buf,
  1657. XFS_DA_LOGRANGE(sib_info, &sib_info->back,
  1658. sizeof(sib_info->back)));
  1659. xfs_da_buf_done(sib_buf);
  1660. sib_buf = NULL;
  1661. }
  1662. par_blkno = mp->m_dirleafblk;
  1663. level = -1;
  1664. /*
  1665. * Walk down the tree looking for the parent of the moved block.
  1666. */
  1667. for (;;) {
  1668. if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
  1669. goto done;
  1670. par_node = par_buf->data;
  1671. if (unlikely(par_node->hdr.info.magic !=
  1672. cpu_to_be16(XFS_DA_NODE_MAGIC) ||
  1673. (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
  1674. XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
  1675. XFS_ERRLEVEL_LOW, mp);
  1676. error = XFS_ERROR(EFSCORRUPTED);
  1677. goto done;
  1678. }
  1679. level = be16_to_cpu(par_node->hdr.level);
  1680. for (entno = 0;
  1681. entno < be16_to_cpu(par_node->hdr.count) &&
  1682. be32_to_cpu(par_node->btree[entno].hashval) < dead_hash;
  1683. entno++)
  1684. continue;
  1685. if (unlikely(entno == be16_to_cpu(par_node->hdr.count))) {
  1686. XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
  1687. XFS_ERRLEVEL_LOW, mp);
  1688. error = XFS_ERROR(EFSCORRUPTED);
  1689. goto done;
  1690. }
  1691. par_blkno = be32_to_cpu(par_node->btree[entno].before);
  1692. if (level == dead_level + 1)
  1693. break;
  1694. xfs_da_brelse(tp, par_buf);
  1695. par_buf = NULL;
  1696. }
  1697. /*
  1698. * We're in the right parent block.
  1699. * Look for the right entry.
  1700. */
  1701. for (;;) {
  1702. for (;
  1703. entno < be16_to_cpu(par_node->hdr.count) &&
  1704. be32_to_cpu(par_node->btree[entno].before) != last_blkno;
  1705. entno++)
  1706. continue;
  1707. if (entno < be16_to_cpu(par_node->hdr.count))
  1708. break;
  1709. par_blkno = be32_to_cpu(par_node->hdr.info.forw);
  1710. xfs_da_brelse(tp, par_buf);
  1711. par_buf = NULL;
  1712. if (unlikely(par_blkno == 0)) {
  1713. XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
  1714. XFS_ERRLEVEL_LOW, mp);
  1715. error = XFS_ERROR(EFSCORRUPTED);
  1716. goto done;
  1717. }
  1718. if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
  1719. goto done;
  1720. par_node = par_buf->data;
  1721. if (unlikely(
  1722. be16_to_cpu(par_node->hdr.level) != level ||
  1723. par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
  1724. XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
  1725. XFS_ERRLEVEL_LOW, mp);
  1726. error = XFS_ERROR(EFSCORRUPTED);
  1727. goto done;
  1728. }
  1729. entno = 0;
  1730. }
  1731. /*
  1732. * Update the parent entry pointing to the moved block.
  1733. */
  1734. par_node->btree[entno].before = cpu_to_be32(dead_blkno);
  1735. xfs_da_log_buf(tp, par_buf,
  1736. XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
  1737. sizeof(par_node->btree[entno].before)));
  1738. xfs_da_buf_done(par_buf);
  1739. xfs_da_buf_done(dead_buf);
  1740. *dead_blknop = last_blkno;
  1741. *dead_bufp = last_buf;
  1742. return 0;
  1743. done:
  1744. if (par_buf)
  1745. xfs_da_brelse(tp, par_buf);
  1746. if (sib_buf)
  1747. xfs_da_brelse(tp, sib_buf);
  1748. xfs_da_brelse(tp, last_buf);
  1749. return error;
  1750. }
  1751. /*
  1752. * Remove a btree block from a directory or attribute.
  1753. */
  1754. int
  1755. xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
  1756. xfs_dabuf_t *dead_buf)
  1757. {
  1758. xfs_inode_t *dp;
  1759. int done, error, w, count;
  1760. xfs_trans_t *tp;
  1761. xfs_mount_t *mp;
  1762. dp = args->dp;
  1763. w = args->whichfork;
  1764. tp = args->trans;
  1765. mp = dp->i_mount;
  1766. if (w == XFS_DATA_FORK)
  1767. count = mp->m_dirblkfsbs;
  1768. else
  1769. count = 1;
  1770. for (;;) {
  1771. /*
  1772. * Remove extents. If we get ENOSPC for a dir we have to move
  1773. * the last block to the place we want to kill.
  1774. */
  1775. if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
  1776. xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
  1777. 0, args->firstblock, args->flist,
  1778. &done)) == ENOSPC) {
  1779. if (w != XFS_DATA_FORK)
  1780. break;
  1781. if ((error = xfs_da_swap_lastblock(args, &dead_blkno,
  1782. &dead_buf)))
  1783. break;
  1784. } else {
  1785. break;
  1786. }
  1787. }
  1788. xfs_da_binval(tp, dead_buf);
  1789. return error;
  1790. }
  1791. /*
  1792. * See if the mapping(s) for this btree block are valid, i.e.
  1793. * don't contain holes, are logically contiguous, and cover the whole range.
  1794. */
  1795. STATIC int
  1796. xfs_da_map_covers_blocks(
  1797. int nmap,
  1798. xfs_bmbt_irec_t *mapp,
  1799. xfs_dablk_t bno,
  1800. int count)
  1801. {
  1802. int i;
  1803. xfs_fileoff_t off;
  1804. for (i = 0, off = bno; i < nmap; i++) {
  1805. if (mapp[i].br_startblock == HOLESTARTBLOCK ||
  1806. mapp[i].br_startblock == DELAYSTARTBLOCK) {
  1807. return 0;
  1808. }
  1809. if (off != mapp[i].br_startoff) {
  1810. return 0;
  1811. }
  1812. off += mapp[i].br_blockcount;
  1813. }
  1814. return off == bno + count;
  1815. }
  1816. /*
  1817. * Make a dabuf.
  1818. * Used for get_buf, read_buf, read_bufr, and reada_buf.
  1819. */
  1820. STATIC int
  1821. xfs_da_do_buf(
  1822. xfs_trans_t *trans,
  1823. xfs_inode_t *dp,
  1824. xfs_dablk_t bno,
  1825. xfs_daddr_t *mappedbnop,
  1826. xfs_dabuf_t **bpp,
  1827. int whichfork,
  1828. int caller)
  1829. {
  1830. xfs_buf_t *bp = NULL;
  1831. xfs_buf_t **bplist;
  1832. int error=0;
  1833. int i;
  1834. xfs_bmbt_irec_t map;
  1835. xfs_bmbt_irec_t *mapp;
  1836. xfs_daddr_t mappedbno;
  1837. xfs_mount_t *mp;
  1838. int nbplist=0;
  1839. int nfsb;
  1840. int nmap;
  1841. xfs_dabuf_t *rbp;
  1842. mp = dp->i_mount;
  1843. nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
  1844. mappedbno = *mappedbnop;
  1845. /*
  1846. * Caller doesn't have a mapping. -2 means don't complain
  1847. * if we land in a hole.
  1848. */
  1849. if (mappedbno == -1 || mappedbno == -2) {
  1850. /*
  1851. * Optimize the one-block case.
  1852. */
  1853. if (nfsb == 1) {
  1854. xfs_fsblock_t fsb;
  1855. if ((error =
  1856. xfs_bmapi_single(trans, dp, whichfork, &fsb,
  1857. (xfs_fileoff_t)bno))) {
  1858. return error;
  1859. }
  1860. mapp = &map;
  1861. if (fsb == NULLFSBLOCK) {
  1862. nmap = 0;
  1863. } else {
  1864. map.br_startblock = fsb;
  1865. map.br_startoff = (xfs_fileoff_t)bno;
  1866. map.br_blockcount = 1;
  1867. nmap = 1;
  1868. }
  1869. } else {
  1870. mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP);
  1871. nmap = nfsb;
  1872. if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno,
  1873. nfsb,
  1874. XFS_BMAPI_METADATA |
  1875. xfs_bmapi_aflag(whichfork),
  1876. NULL, 0, mapp, &nmap, NULL)))
  1877. goto exit0;
  1878. }
  1879. } else {
  1880. map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
  1881. map.br_startoff = (xfs_fileoff_t)bno;
  1882. map.br_blockcount = nfsb;
  1883. mapp = &map;
  1884. nmap = 1;
  1885. }
  1886. if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) {
  1887. error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
  1888. if (unlikely(error == EFSCORRUPTED)) {
  1889. if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
  1890. xfs_alert(mp, "%s: bno %lld dir: inode %lld",
  1891. __func__, (long long)bno,
  1892. (long long)dp->i_ino);
  1893. for (i = 0; i < nmap; i++) {
  1894. xfs_alert(mp,
  1895. "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
  1896. i,
  1897. (long long)mapp[i].br_startoff,
  1898. (long long)mapp[i].br_startblock,
  1899. (long long)mapp[i].br_blockcount,
  1900. mapp[i].br_state);
  1901. }
  1902. }
  1903. XFS_ERROR_REPORT("xfs_da_do_buf(1)",
  1904. XFS_ERRLEVEL_LOW, mp);
  1905. }
  1906. goto exit0;
  1907. }
  1908. if (caller != 3 && nmap > 1) {
  1909. bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP);
  1910. nbplist = 0;
  1911. } else
  1912. bplist = NULL;
  1913. /*
  1914. * Turn the mapping(s) into buffer(s).
  1915. */
  1916. for (i = 0; i < nmap; i++) {
  1917. int nmapped;
  1918. mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock);
  1919. if (i == 0)
  1920. *mappedbnop = mappedbno;
  1921. nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount);
  1922. switch (caller) {
  1923. case 0:
  1924. bp = xfs_trans_get_buf(trans, mp->m_ddev_targp,
  1925. mappedbno, nmapped, 0);
  1926. error = bp ? XFS_BUF_GETERROR(bp) : XFS_ERROR(EIO);
  1927. break;
  1928. case 1:
  1929. case 2:
  1930. bp = NULL;
  1931. error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp,
  1932. mappedbno, nmapped, 0, &bp);
  1933. break;
  1934. case 3:
  1935. xfs_buf_readahead(mp->m_ddev_targp, mappedbno, nmapped);
  1936. error = 0;
  1937. bp = NULL;
  1938. break;
  1939. }
  1940. if (error) {
  1941. if (bp)
  1942. xfs_trans_brelse(trans, bp);
  1943. goto exit1;
  1944. }
  1945. if (!bp)
  1946. continue;
  1947. if (caller == 1) {
  1948. if (whichfork == XFS_ATTR_FORK) {
  1949. XFS_BUF_SET_VTYPE_REF(bp, B_FS_ATTR_BTREE,
  1950. XFS_ATTR_BTREE_REF);
  1951. } else {
  1952. XFS_BUF_SET_VTYPE_REF(bp, B_FS_DIR_BTREE,
  1953. XFS_DIR_BTREE_REF);
  1954. }
  1955. }
  1956. if (bplist) {
  1957. bplist[nbplist++] = bp;
  1958. }
  1959. }
  1960. /*
  1961. * Build a dabuf structure.
  1962. */
  1963. if (bplist) {
  1964. rbp = xfs_da_buf_make(nbplist, bplist);
  1965. } else if (bp)
  1966. rbp = xfs_da_buf_make(1, &bp);
  1967. else
  1968. rbp = NULL;
  1969. /*
  1970. * For read_buf, check the magic number.
  1971. */
  1972. if (caller == 1) {
  1973. xfs_dir2_data_hdr_t *hdr = rbp->data;
  1974. xfs_dir2_free_t *free = rbp->data;
  1975. xfs_da_blkinfo_t *info = rbp->data;
  1976. uint magic, magic1;
  1977. magic = be16_to_cpu(info->magic);
  1978. magic1 = be32_to_cpu(hdr->magic);
  1979. if (unlikely(
  1980. XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
  1981. (magic != XFS_ATTR_LEAF_MAGIC) &&
  1982. (magic != XFS_DIR2_LEAF1_MAGIC) &&
  1983. (magic != XFS_DIR2_LEAFN_MAGIC) &&
  1984. (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
  1985. (magic1 != XFS_DIR2_DATA_MAGIC) &&
  1986. (free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
  1987. mp, XFS_ERRTAG_DA_READ_BUF,
  1988. XFS_RANDOM_DA_READ_BUF))) {
  1989. trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
  1990. XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
  1991. XFS_ERRLEVEL_LOW, mp, info);
  1992. error = XFS_ERROR(EFSCORRUPTED);
  1993. xfs_da_brelse(trans, rbp);
  1994. nbplist = 0;
  1995. goto exit1;
  1996. }
  1997. }
  1998. if (bplist) {
  1999. kmem_free(bplist);
  2000. }
  2001. if (mapp != &map) {
  2002. kmem_free(mapp);
  2003. }
  2004. if (bpp)
  2005. *bpp = rbp;
  2006. return 0;
  2007. exit1:
  2008. if (bplist) {
  2009. for (i = 0; i < nbplist; i++)
  2010. xfs_trans_brelse(trans, bplist[i]);
  2011. kmem_free(bplist);
  2012. }
  2013. exit0:
  2014. if (mapp != &map)
  2015. kmem_free(mapp);
  2016. if (bpp)
  2017. *bpp = NULL;
  2018. return error;
  2019. }
  2020. /*
  2021. * Get a buffer for the dir/attr block.
  2022. */
  2023. int
  2024. xfs_da_get_buf(
  2025. xfs_trans_t *trans,
  2026. xfs_inode_t *dp,
  2027. xfs_dablk_t bno,
  2028. xfs_daddr_t mappedbno,
  2029. xfs_dabuf_t **bpp,
  2030. int whichfork)
  2031. {
  2032. return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0);
  2033. }
  2034. /*
  2035. * Get a buffer for the dir/attr block, fill in the contents.
  2036. */
  2037. int
  2038. xfs_da_read_buf(
  2039. xfs_trans_t *trans,
  2040. xfs_inode_t *dp,
  2041. xfs_dablk_t bno,
  2042. xfs_daddr_t mappedbno,
  2043. xfs_dabuf_t **bpp,
  2044. int whichfork)
  2045. {
  2046. return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1);
  2047. }
  2048. /*
  2049. * Readahead the dir/attr block.
  2050. */
  2051. xfs_daddr_t
  2052. xfs_da_reada_buf(
  2053. xfs_trans_t *trans,
  2054. xfs_inode_t *dp,
  2055. xfs_dablk_t bno,
  2056. int whichfork)
  2057. {
  2058. xfs_daddr_t rval;
  2059. rval = -1;
  2060. if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3))
  2061. return -1;
  2062. else
  2063. return rval;
  2064. }
  2065. kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
  2066. kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */
  2067. /*
  2068. * Allocate a dir-state structure.
  2069. * We don't put them on the stack since they're large.
  2070. */
  2071. xfs_da_state_t *
  2072. xfs_da_state_alloc(void)
  2073. {
  2074. return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
  2075. }
  2076. /*
  2077. * Kill the altpath contents of a da-state structure.
  2078. */
  2079. STATIC void
  2080. xfs_da_state_kill_altpath(xfs_da_state_t *state)
  2081. {
  2082. int i;
  2083. for (i = 0; i < state->altpath.active; i++) {
  2084. if (state->altpath.blk[i].bp) {
  2085. if (state->altpath.blk[i].bp != state->path.blk[i].bp)
  2086. xfs_da_buf_done(state->altpath.blk[i].bp);
  2087. state->altpath.blk[i].bp = NULL;
  2088. }
  2089. }
  2090. state->altpath.active = 0;
  2091. }
  2092. /*
  2093. * Free a da-state structure.
  2094. */
  2095. void
  2096. xfs_da_state_free(xfs_da_state_t *state)
  2097. {
  2098. int i;
  2099. xfs_da_state_kill_altpath(state);
  2100. for (i = 0; i < state->path.active; i++) {
  2101. if (state->path.blk[i].bp)
  2102. xfs_da_buf_done(state->path.blk[i].bp);
  2103. }
  2104. if (state->extravalid && state->extrablk.bp)
  2105. xfs_da_buf_done(state->extrablk.bp);
  2106. #ifdef DEBUG
  2107. memset((char *)state, 0, sizeof(*state));
  2108. #endif /* DEBUG */
  2109. kmem_zone_free(xfs_da_state_zone, state);
  2110. }
  2111. /*
  2112. * Create a dabuf.
  2113. */
  2114. /* ARGSUSED */
  2115. STATIC xfs_dabuf_t *
  2116. xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
  2117. {
  2118. xfs_buf_t *bp;
  2119. xfs_dabuf_t *dabuf;
  2120. int i;
  2121. int off;
  2122. if (nbuf == 1)
  2123. dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS);
  2124. else
  2125. dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
  2126. dabuf->dirty = 0;
  2127. if (nbuf == 1) {
  2128. dabuf->nbuf = 1;
  2129. bp = bps[0];
  2130. dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp));
  2131. dabuf->data = XFS_BUF_PTR(bp);
  2132. dabuf->bps[0] = bp;
  2133. } else {
  2134. dabuf->nbuf = nbuf;
  2135. for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
  2136. dabuf->bps[i] = bp = bps[i];
  2137. dabuf->bbcount += BTOBB(XFS_BUF_COUNT(bp));
  2138. }
  2139. dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
  2140. for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) {
  2141. bp = bps[i];
  2142. memcpy((char *)dabuf->data + off, XFS_BUF_PTR(bp),
  2143. XFS_BUF_COUNT(bp));
  2144. }
  2145. }
  2146. return dabuf;
  2147. }
  2148. /*
  2149. * Un-dirty a dabuf.
  2150. */
  2151. STATIC void
  2152. xfs_da_buf_clean(xfs_dabuf_t *dabuf)
  2153. {
  2154. xfs_buf_t *bp;
  2155. int i;
  2156. int off;
  2157. if (dabuf->dirty) {
  2158. ASSERT(dabuf->nbuf > 1);
  2159. dabuf->dirty = 0;
  2160. for (i = off = 0; i < dabuf->nbuf;
  2161. i++, off += XFS_BUF_COUNT(bp)) {
  2162. bp = dabuf->bps[i];
  2163. memcpy(XFS_BUF_PTR(bp), (char *)dabuf->data + off,
  2164. XFS_BUF_COUNT(bp));
  2165. }
  2166. }
  2167. }
  2168. /*
  2169. * Release a dabuf.
  2170. */
  2171. void
  2172. xfs_da_buf_done(xfs_dabuf_t *dabuf)
  2173. {
  2174. ASSERT(dabuf);
  2175. ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
  2176. if (dabuf->dirty)
  2177. xfs_da_buf_clean(dabuf);
  2178. if (dabuf->nbuf > 1) {
  2179. kmem_free(dabuf->data);
  2180. kmem_free(dabuf);
  2181. } else {
  2182. kmem_zone_free(xfs_dabuf_zone, dabuf);
  2183. }
  2184. }
  2185. /*
  2186. * Log transaction from a dabuf.
  2187. */
  2188. void
  2189. xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
  2190. {
  2191. xfs_buf_t *bp;
  2192. uint f;
  2193. int i;
  2194. uint l;
  2195. int off;
  2196. ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
  2197. if (dabuf->nbuf == 1) {
  2198. ASSERT(dabuf->data == (void *)XFS_BUF_PTR(dabuf->bps[0]));
  2199. xfs_trans_log_buf(tp, dabuf->bps[0], first, last);
  2200. return;
  2201. }
  2202. dabuf->dirty = 1;
  2203. ASSERT(first <= last);
  2204. for (i = off = 0; i < dabuf->nbuf; i++, off += XFS_BUF_COUNT(bp)) {
  2205. bp = dabuf->bps[i];
  2206. f = off;
  2207. l = f + XFS_BUF_COUNT(bp) - 1;
  2208. if (f < first)
  2209. f = first;
  2210. if (l > last)
  2211. l = last;
  2212. if (f <= l)
  2213. xfs_trans_log_buf(tp, bp, f - off, l - off);
  2214. /*
  2215. * B_DONE is set by xfs_trans_log buf.
  2216. * If we don't set it on a new buffer (get not read)
  2217. * then if we don't put anything in the buffer it won't
  2218. * be set, and at commit it it released into the cache,
  2219. * and then a read will fail.
  2220. */
  2221. else if (!(XFS_BUF_ISDONE(bp)))
  2222. XFS_BUF_DONE(bp);
  2223. }
  2224. ASSERT(last < off);
  2225. }
  2226. /*
  2227. * Release dabuf from a transaction.
  2228. * Have to free up the dabuf before the buffers are released,
  2229. * since the synchronization on the dabuf is really the lock on the buffer.
  2230. */
  2231. void
  2232. xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
  2233. {
  2234. xfs_buf_t *bp;
  2235. xfs_buf_t **bplist;
  2236. int i;
  2237. int nbuf;
  2238. ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
  2239. if ((nbuf = dabuf->nbuf) == 1) {
  2240. bplist = &bp;
  2241. bp = dabuf->bps[0];
  2242. } else {
  2243. bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
  2244. memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
  2245. }
  2246. xfs_da_buf_done(dabuf);
  2247. for (i = 0; i < nbuf; i++)
  2248. xfs_trans_brelse(tp, bplist[i]);
  2249. if (bplist != &bp)
  2250. kmem_free(bplist);
  2251. }
  2252. /*
  2253. * Invalidate dabuf from a transaction.
  2254. */
  2255. void
  2256. xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
  2257. {
  2258. xfs_buf_t *bp;
  2259. xfs_buf_t **bplist;
  2260. int i;
  2261. int nbuf;
  2262. ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
  2263. if ((nbuf = dabuf->nbuf) == 1) {
  2264. bplist = &bp;
  2265. bp = dabuf->bps[0];
  2266. } else {
  2267. bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
  2268. memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
  2269. }
  2270. xfs_da_buf_done(dabuf);
  2271. for (i = 0; i < nbuf; i++)
  2272. xfs_trans_binval(tp, bplist[i]);
  2273. if (bplist != &bp)
  2274. kmem_free(bplist);
  2275. }
  2276. /*
  2277. * Get the first daddr from a dabuf.
  2278. */
  2279. xfs_daddr_t
  2280. xfs_da_blkno(xfs_dabuf_t *dabuf)
  2281. {
  2282. ASSERT(dabuf->nbuf);
  2283. ASSERT(dabuf->data);
  2284. return XFS_BUF_ADDR(dabuf->bps[0]);
  2285. }