xfs_da_btree.c 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_types.h"
  21. #include "xfs_bit.h"
  22. #include "xfs_log.h"
  23. #include "xfs_trans.h"
  24. #include "xfs_sb.h"
  25. #include "xfs_ag.h"
  26. #include "xfs_mount.h"
  27. #include "xfs_da_btree.h"
  28. #include "xfs_bmap_btree.h"
  29. #include "xfs_dir2.h"
  30. #include "xfs_dir2_format.h"
  31. #include "xfs_dir2_priv.h"
  32. #include "xfs_dinode.h"
  33. #include "xfs_inode.h"
  34. #include "xfs_inode_item.h"
  35. #include "xfs_alloc.h"
  36. #include "xfs_bmap.h"
  37. #include "xfs_attr.h"
  38. #include "xfs_attr_leaf.h"
  39. #include "xfs_error.h"
  40. #include "xfs_trace.h"
  41. /*
  42. * xfs_da_btree.c
  43. *
  44. * Routines to implement directories as Btrees of hashed names.
  45. */
  46. /*========================================================================
  47. * Function prototypes for the kernel.
  48. *========================================================================*/
  49. /*
  50. * Routines used for growing the Btree.
  51. */
  52. STATIC int xfs_da_root_split(xfs_da_state_t *state,
  53. xfs_da_state_blk_t *existing_root,
  54. xfs_da_state_blk_t *new_child);
  55. STATIC int xfs_da_node_split(xfs_da_state_t *state,
  56. xfs_da_state_blk_t *existing_blk,
  57. xfs_da_state_blk_t *split_blk,
  58. xfs_da_state_blk_t *blk_to_add,
  59. int treelevel,
  60. int *result);
  61. STATIC void xfs_da_node_rebalance(xfs_da_state_t *state,
  62. xfs_da_state_blk_t *node_blk_1,
  63. xfs_da_state_blk_t *node_blk_2);
  64. STATIC void xfs_da_node_add(xfs_da_state_t *state,
  65. xfs_da_state_blk_t *old_node_blk,
  66. xfs_da_state_blk_t *new_node_blk);
  67. /*
  68. * Routines used for shrinking the Btree.
  69. */
  70. STATIC int xfs_da_root_join(xfs_da_state_t *state,
  71. xfs_da_state_blk_t *root_blk);
  72. STATIC int xfs_da_node_toosmall(xfs_da_state_t *state, int *retval);
  73. STATIC void xfs_da_node_remove(xfs_da_state_t *state,
  74. xfs_da_state_blk_t *drop_blk);
  75. STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
  76. xfs_da_state_blk_t *src_node_blk,
  77. xfs_da_state_blk_t *dst_node_blk);
  78. /*
  79. * Utility routines.
  80. */
  81. STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
  82. STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
  83. STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps);
  84. STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
  85. xfs_da_state_blk_t *drop_blk,
  86. xfs_da_state_blk_t *save_blk);
  87. STATIC void xfs_da_state_kill_altpath(xfs_da_state_t *state);
  88. /*========================================================================
  89. * Routines used for growing the Btree.
  90. *========================================================================*/
  91. /*
  92. * Create the initial contents of an intermediate node.
  93. */
  94. int
  95. xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
  96. xfs_dabuf_t **bpp, int whichfork)
  97. {
  98. xfs_da_intnode_t *node;
  99. xfs_dabuf_t *bp;
  100. int error;
  101. xfs_trans_t *tp;
  102. trace_xfs_da_node_create(args);
  103. tp = args->trans;
  104. error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
  105. if (error)
  106. return(error);
  107. ASSERT(bp != NULL);
  108. node = bp->data;
  109. node->hdr.info.forw = 0;
  110. node->hdr.info.back = 0;
  111. node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
  112. node->hdr.info.pad = 0;
  113. node->hdr.count = 0;
  114. node->hdr.level = cpu_to_be16(level);
  115. xfs_da_log_buf(tp, bp,
  116. XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
  117. *bpp = bp;
  118. return(0);
  119. }
  120. /*
  121. * Split a leaf node, rebalance, then possibly split
  122. * intermediate nodes, rebalance, etc.
  123. */
  124. int /* error */
  125. xfs_da_split(xfs_da_state_t *state)
  126. {
  127. xfs_da_state_blk_t *oldblk, *newblk, *addblk;
  128. xfs_da_intnode_t *node;
  129. xfs_dabuf_t *bp;
  130. int max, action, error, i;
  131. trace_xfs_da_split(state->args);
  132. /*
  133. * Walk back up the tree splitting/inserting/adjusting as necessary.
  134. * If we need to insert and there isn't room, split the node, then
  135. * decide which fragment to insert the new block from below into.
  136. * Note that we may split the root this way, but we need more fixup.
  137. */
  138. max = state->path.active - 1;
  139. ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
  140. ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
  141. state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
  142. addblk = &state->path.blk[max]; /* initial dummy value */
  143. for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
  144. oldblk = &state->path.blk[i];
  145. newblk = &state->altpath.blk[i];
  146. /*
  147. * If a leaf node then
  148. * Allocate a new leaf node, then rebalance across them.
  149. * else if an intermediate node then
  150. * We split on the last layer, must we split the node?
  151. */
  152. switch (oldblk->magic) {
  153. case XFS_ATTR_LEAF_MAGIC:
  154. error = xfs_attr_leaf_split(state, oldblk, newblk);
  155. if ((error != 0) && (error != ENOSPC)) {
  156. return(error); /* GROT: attr is inconsistent */
  157. }
  158. if (!error) {
  159. addblk = newblk;
  160. break;
  161. }
  162. /*
  163. * Entry wouldn't fit, split the leaf again.
  164. */
  165. state->extravalid = 1;
  166. if (state->inleaf) {
  167. state->extraafter = 0; /* before newblk */
  168. trace_xfs_attr_leaf_split_before(state->args);
  169. error = xfs_attr_leaf_split(state, oldblk,
  170. &state->extrablk);
  171. } else {
  172. state->extraafter = 1; /* after newblk */
  173. trace_xfs_attr_leaf_split_after(state->args);
  174. error = xfs_attr_leaf_split(state, newblk,
  175. &state->extrablk);
  176. }
  177. if (error)
  178. return(error); /* GROT: attr inconsistent */
  179. addblk = newblk;
  180. break;
  181. case XFS_DIR2_LEAFN_MAGIC:
  182. error = xfs_dir2_leafn_split(state, oldblk, newblk);
  183. if (error)
  184. return error;
  185. addblk = newblk;
  186. break;
  187. case XFS_DA_NODE_MAGIC:
  188. error = xfs_da_node_split(state, oldblk, newblk, addblk,
  189. max - i, &action);
  190. xfs_da_buf_done(addblk->bp);
  191. addblk->bp = NULL;
  192. if (error)
  193. return(error); /* GROT: dir is inconsistent */
  194. /*
  195. * Record the newly split block for the next time thru?
  196. */
  197. if (action)
  198. addblk = newblk;
  199. else
  200. addblk = NULL;
  201. break;
  202. }
  203. /*
  204. * Update the btree to show the new hashval for this child.
  205. */
  206. xfs_da_fixhashpath(state, &state->path);
  207. /*
  208. * If we won't need this block again, it's getting dropped
  209. * from the active path by the loop control, so we need
  210. * to mark it done now.
  211. */
  212. if (i > 0 || !addblk)
  213. xfs_da_buf_done(oldblk->bp);
  214. }
  215. if (!addblk)
  216. return(0);
  217. /*
  218. * Split the root node.
  219. */
  220. ASSERT(state->path.active == 0);
  221. oldblk = &state->path.blk[0];
  222. error = xfs_da_root_split(state, oldblk, addblk);
  223. if (error) {
  224. xfs_da_buf_done(oldblk->bp);
  225. xfs_da_buf_done(addblk->bp);
  226. addblk->bp = NULL;
  227. return(error); /* GROT: dir is inconsistent */
  228. }
  229. /*
  230. * Update pointers to the node which used to be block 0 and
  231. * just got bumped because of the addition of a new root node.
  232. * There might be three blocks involved if a double split occurred,
  233. * and the original block 0 could be at any position in the list.
  234. */
  235. node = oldblk->bp->data;
  236. if (node->hdr.info.forw) {
  237. if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
  238. bp = addblk->bp;
  239. } else {
  240. ASSERT(state->extravalid);
  241. bp = state->extrablk.bp;
  242. }
  243. node = bp->data;
  244. node->hdr.info.back = cpu_to_be32(oldblk->blkno);
  245. xfs_da_log_buf(state->args->trans, bp,
  246. XFS_DA_LOGRANGE(node, &node->hdr.info,
  247. sizeof(node->hdr.info)));
  248. }
  249. node = oldblk->bp->data;
  250. if (node->hdr.info.back) {
  251. if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
  252. bp = addblk->bp;
  253. } else {
  254. ASSERT(state->extravalid);
  255. bp = state->extrablk.bp;
  256. }
  257. node = bp->data;
  258. node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
  259. xfs_da_log_buf(state->args->trans, bp,
  260. XFS_DA_LOGRANGE(node, &node->hdr.info,
  261. sizeof(node->hdr.info)));
  262. }
  263. xfs_da_buf_done(oldblk->bp);
  264. xfs_da_buf_done(addblk->bp);
  265. addblk->bp = NULL;
  266. return(0);
  267. }
  268. /*
  269. * Split the root. We have to create a new root and point to the two
  270. * parts (the split old root) that we just created. Copy block zero to
  271. * the EOF, extending the inode in process.
  272. */
  273. STATIC int /* error */
  274. xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
  275. xfs_da_state_blk_t *blk2)
  276. {
  277. xfs_da_intnode_t *node, *oldroot;
  278. xfs_da_args_t *args;
  279. xfs_dablk_t blkno;
  280. xfs_dabuf_t *bp;
  281. int error, size;
  282. xfs_inode_t *dp;
  283. xfs_trans_t *tp;
  284. xfs_mount_t *mp;
  285. xfs_dir2_leaf_t *leaf;
  286. trace_xfs_da_root_split(state->args);
  287. /*
  288. * Copy the existing (incorrect) block from the root node position
  289. * to a free space somewhere.
  290. */
  291. args = state->args;
  292. ASSERT(args != NULL);
  293. error = xfs_da_grow_inode(args, &blkno);
  294. if (error)
  295. return(error);
  296. dp = args->dp;
  297. tp = args->trans;
  298. mp = state->mp;
  299. error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
  300. if (error)
  301. return(error);
  302. ASSERT(bp != NULL);
  303. node = bp->data;
  304. oldroot = blk1->bp->data;
  305. if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
  306. size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
  307. (char *)oldroot);
  308. } else {
  309. ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
  310. leaf = (xfs_dir2_leaf_t *)oldroot;
  311. size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] -
  312. (char *)leaf);
  313. }
  314. memcpy(node, oldroot, size);
  315. xfs_da_log_buf(tp, bp, 0, size - 1);
  316. xfs_da_buf_done(blk1->bp);
  317. blk1->bp = bp;
  318. blk1->blkno = blkno;
  319. /*
  320. * Set up the new root node.
  321. */
  322. error = xfs_da_node_create(args,
  323. (args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
  324. be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
  325. if (error)
  326. return(error);
  327. node = bp->data;
  328. node->btree[0].hashval = cpu_to_be32(blk1->hashval);
  329. node->btree[0].before = cpu_to_be32(blk1->blkno);
  330. node->btree[1].hashval = cpu_to_be32(blk2->hashval);
  331. node->btree[1].before = cpu_to_be32(blk2->blkno);
  332. node->hdr.count = cpu_to_be16(2);
  333. #ifdef DEBUG
  334. if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
  335. ASSERT(blk1->blkno >= mp->m_dirleafblk &&
  336. blk1->blkno < mp->m_dirfreeblk);
  337. ASSERT(blk2->blkno >= mp->m_dirleafblk &&
  338. blk2->blkno < mp->m_dirfreeblk);
  339. }
  340. #endif
  341. /* Header is already logged by xfs_da_node_create */
  342. xfs_da_log_buf(tp, bp,
  343. XFS_DA_LOGRANGE(node, node->btree,
  344. sizeof(xfs_da_node_entry_t) * 2));
  345. xfs_da_buf_done(bp);
  346. return(0);
  347. }
  348. /*
  349. * Split the node, rebalance, then add the new entry.
  350. */
  351. STATIC int /* error */
  352. xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
  353. xfs_da_state_blk_t *newblk,
  354. xfs_da_state_blk_t *addblk,
  355. int treelevel, int *result)
  356. {
  357. xfs_da_intnode_t *node;
  358. xfs_dablk_t blkno;
  359. int newcount, error;
  360. int useextra;
  361. trace_xfs_da_node_split(state->args);
  362. node = oldblk->bp->data;
  363. ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  364. /*
  365. * With V2 dirs the extra block is data or freespace.
  366. */
  367. useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
  368. newcount = 1 + useextra;
  369. /*
  370. * Do we have to split the node?
  371. */
  372. if ((be16_to_cpu(node->hdr.count) + newcount) > state->node_ents) {
  373. /*
  374. * Allocate a new node, add to the doubly linked chain of
  375. * nodes, then move some of our excess entries into it.
  376. */
  377. error = xfs_da_grow_inode(state->args, &blkno);
  378. if (error)
  379. return(error); /* GROT: dir is inconsistent */
  380. error = xfs_da_node_create(state->args, blkno, treelevel,
  381. &newblk->bp, state->args->whichfork);
  382. if (error)
  383. return(error); /* GROT: dir is inconsistent */
  384. newblk->blkno = blkno;
  385. newblk->magic = XFS_DA_NODE_MAGIC;
  386. xfs_da_node_rebalance(state, oldblk, newblk);
  387. error = xfs_da_blk_link(state, oldblk, newblk);
  388. if (error)
  389. return(error);
  390. *result = 1;
  391. } else {
  392. *result = 0;
  393. }
  394. /*
  395. * Insert the new entry(s) into the correct block
  396. * (updating last hashval in the process).
  397. *
  398. * xfs_da_node_add() inserts BEFORE the given index,
  399. * and as a result of using node_lookup_int() we always
  400. * point to a valid entry (not after one), but a split
  401. * operation always results in a new block whose hashvals
  402. * FOLLOW the current block.
  403. *
  404. * If we had double-split op below us, then add the extra block too.
  405. */
  406. node = oldblk->bp->data;
  407. if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
  408. oldblk->index++;
  409. xfs_da_node_add(state, oldblk, addblk);
  410. if (useextra) {
  411. if (state->extraafter)
  412. oldblk->index++;
  413. xfs_da_node_add(state, oldblk, &state->extrablk);
  414. state->extravalid = 0;
  415. }
  416. } else {
  417. newblk->index++;
  418. xfs_da_node_add(state, newblk, addblk);
  419. if (useextra) {
  420. if (state->extraafter)
  421. newblk->index++;
  422. xfs_da_node_add(state, newblk, &state->extrablk);
  423. state->extravalid = 0;
  424. }
  425. }
  426. return(0);
  427. }
  428. /*
  429. * Balance the btree elements between two intermediate nodes,
  430. * usually one full and one empty.
  431. *
  432. * NOTE: if blk2 is empty, then it will get the upper half of blk1.
  433. */
  434. STATIC void
  435. xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
  436. xfs_da_state_blk_t *blk2)
  437. {
  438. xfs_da_intnode_t *node1, *node2, *tmpnode;
  439. xfs_da_node_entry_t *btree_s, *btree_d;
  440. int count, tmp;
  441. xfs_trans_t *tp;
  442. trace_xfs_da_node_rebalance(state->args);
  443. node1 = blk1->bp->data;
  444. node2 = blk2->bp->data;
  445. /*
  446. * Figure out how many entries need to move, and in which direction.
  447. * Swap the nodes around if that makes it simpler.
  448. */
  449. if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
  450. ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) ||
  451. (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
  452. be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
  453. tmpnode = node1;
  454. node1 = node2;
  455. node2 = tmpnode;
  456. }
  457. ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  458. ASSERT(node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  459. count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2;
  460. if (count == 0)
  461. return;
  462. tp = state->args->trans;
  463. /*
  464. * Two cases: high-to-low and low-to-high.
  465. */
  466. if (count > 0) {
  467. /*
  468. * Move elements in node2 up to make a hole.
  469. */
  470. if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) {
  471. tmp *= (uint)sizeof(xfs_da_node_entry_t);
  472. btree_s = &node2->btree[0];
  473. btree_d = &node2->btree[count];
  474. memmove(btree_d, btree_s, tmp);
  475. }
  476. /*
  477. * Move the req'd B-tree elements from high in node1 to
  478. * low in node2.
  479. */
  480. be16_add_cpu(&node2->hdr.count, count);
  481. tmp = count * (uint)sizeof(xfs_da_node_entry_t);
  482. btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
  483. btree_d = &node2->btree[0];
  484. memcpy(btree_d, btree_s, tmp);
  485. be16_add_cpu(&node1->hdr.count, -count);
  486. } else {
  487. /*
  488. * Move the req'd B-tree elements from low in node2 to
  489. * high in node1.
  490. */
  491. count = -count;
  492. tmp = count * (uint)sizeof(xfs_da_node_entry_t);
  493. btree_s = &node2->btree[0];
  494. btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
  495. memcpy(btree_d, btree_s, tmp);
  496. be16_add_cpu(&node1->hdr.count, count);
  497. xfs_da_log_buf(tp, blk1->bp,
  498. XFS_DA_LOGRANGE(node1, btree_d, tmp));
  499. /*
  500. * Move elements in node2 down to fill the hole.
  501. */
  502. tmp = be16_to_cpu(node2->hdr.count) - count;
  503. tmp *= (uint)sizeof(xfs_da_node_entry_t);
  504. btree_s = &node2->btree[count];
  505. btree_d = &node2->btree[0];
  506. memmove(btree_d, btree_s, tmp);
  507. be16_add_cpu(&node2->hdr.count, -count);
  508. }
  509. /*
  510. * Log header of node 1 and all current bits of node 2.
  511. */
  512. xfs_da_log_buf(tp, blk1->bp,
  513. XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
  514. xfs_da_log_buf(tp, blk2->bp,
  515. XFS_DA_LOGRANGE(node2, &node2->hdr,
  516. sizeof(node2->hdr) +
  517. sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
  518. /*
  519. * Record the last hashval from each block for upward propagation.
  520. * (note: don't use the swapped node pointers)
  521. */
  522. node1 = blk1->bp->data;
  523. node2 = blk2->bp->data;
  524. blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
  525. blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
  526. /*
  527. * Adjust the expected index for insertion.
  528. */
  529. if (blk1->index >= be16_to_cpu(node1->hdr.count)) {
  530. blk2->index = blk1->index - be16_to_cpu(node1->hdr.count);
  531. blk1->index = be16_to_cpu(node1->hdr.count) + 1; /* make it invalid */
  532. }
  533. }
  534. /*
  535. * Add a new entry to an intermediate node.
  536. */
  537. STATIC void
  538. xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
  539. xfs_da_state_blk_t *newblk)
  540. {
  541. xfs_da_intnode_t *node;
  542. xfs_da_node_entry_t *btree;
  543. int tmp;
  544. trace_xfs_da_node_add(state->args);
  545. node = oldblk->bp->data;
  546. ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  547. ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
  548. ASSERT(newblk->blkno != 0);
  549. if (state->args->whichfork == XFS_DATA_FORK)
  550. ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
  551. newblk->blkno < state->mp->m_dirfreeblk);
  552. /*
  553. * We may need to make some room before we insert the new node.
  554. */
  555. tmp = 0;
  556. btree = &node->btree[ oldblk->index ];
  557. if (oldblk->index < be16_to_cpu(node->hdr.count)) {
  558. tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree);
  559. memmove(btree + 1, btree, tmp);
  560. }
  561. btree->hashval = cpu_to_be32(newblk->hashval);
  562. btree->before = cpu_to_be32(newblk->blkno);
  563. xfs_da_log_buf(state->args->trans, oldblk->bp,
  564. XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
  565. be16_add_cpu(&node->hdr.count, 1);
  566. xfs_da_log_buf(state->args->trans, oldblk->bp,
  567. XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
  568. /*
  569. * Copy the last hash value from the oldblk to propagate upwards.
  570. */
  571. oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval);
  572. }
  573. /*========================================================================
  574. * Routines used for shrinking the Btree.
  575. *========================================================================*/
  576. /*
  577. * Deallocate an empty leaf node, remove it from its parent,
  578. * possibly deallocating that block, etc...
  579. */
  580. int
  581. xfs_da_join(xfs_da_state_t *state)
  582. {
  583. xfs_da_state_blk_t *drop_blk, *save_blk;
  584. int action, error;
  585. trace_xfs_da_join(state->args);
  586. action = 0;
  587. drop_blk = &state->path.blk[ state->path.active-1 ];
  588. save_blk = &state->altpath.blk[ state->path.active-1 ];
  589. ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
  590. ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
  591. drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
  592. /*
  593. * Walk back up the tree joining/deallocating as necessary.
  594. * When we stop dropping blocks, break out.
  595. */
  596. for ( ; state->path.active >= 2; drop_blk--, save_blk--,
  597. state->path.active--) {
  598. /*
  599. * See if we can combine the block with a neighbor.
  600. * (action == 0) => no options, just leave
  601. * (action == 1) => coalesce, then unlink
  602. * (action == 2) => block empty, unlink it
  603. */
  604. switch (drop_blk->magic) {
  605. case XFS_ATTR_LEAF_MAGIC:
  606. error = xfs_attr_leaf_toosmall(state, &action);
  607. if (error)
  608. return(error);
  609. if (action == 0)
  610. return(0);
  611. xfs_attr_leaf_unbalance(state, drop_blk, save_blk);
  612. break;
  613. case XFS_DIR2_LEAFN_MAGIC:
  614. error = xfs_dir2_leafn_toosmall(state, &action);
  615. if (error)
  616. return error;
  617. if (action == 0)
  618. return 0;
  619. xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
  620. break;
  621. case XFS_DA_NODE_MAGIC:
  622. /*
  623. * Remove the offending node, fixup hashvals,
  624. * check for a toosmall neighbor.
  625. */
  626. xfs_da_node_remove(state, drop_blk);
  627. xfs_da_fixhashpath(state, &state->path);
  628. error = xfs_da_node_toosmall(state, &action);
  629. if (error)
  630. return(error);
  631. if (action == 0)
  632. return 0;
  633. xfs_da_node_unbalance(state, drop_blk, save_blk);
  634. break;
  635. }
  636. xfs_da_fixhashpath(state, &state->altpath);
  637. error = xfs_da_blk_unlink(state, drop_blk, save_blk);
  638. xfs_da_state_kill_altpath(state);
  639. if (error)
  640. return(error);
  641. error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
  642. drop_blk->bp);
  643. drop_blk->bp = NULL;
  644. if (error)
  645. return(error);
  646. }
  647. /*
  648. * We joined all the way to the top. If it turns out that
  649. * we only have one entry in the root, make the child block
  650. * the new root.
  651. */
  652. xfs_da_node_remove(state, drop_blk);
  653. xfs_da_fixhashpath(state, &state->path);
  654. error = xfs_da_root_join(state, &state->path.blk[0]);
  655. return(error);
  656. }
  657. #ifdef DEBUG
  658. static void
  659. xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
  660. {
  661. __be16 magic = blkinfo->magic;
  662. if (level == 1) {
  663. ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
  664. magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
  665. } else
  666. ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  667. ASSERT(!blkinfo->forw);
  668. ASSERT(!blkinfo->back);
  669. }
  670. #else /* !DEBUG */
  671. #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
  672. #endif /* !DEBUG */
  673. /*
  674. * We have only one entry in the root. Copy the only remaining child of
  675. * the old root to block 0 as the new root node.
  676. */
  677. STATIC int
  678. xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
  679. {
  680. xfs_da_intnode_t *oldroot;
  681. xfs_da_args_t *args;
  682. xfs_dablk_t child;
  683. xfs_dabuf_t *bp;
  684. int error;
  685. trace_xfs_da_root_join(state->args);
  686. args = state->args;
  687. ASSERT(args != NULL);
  688. ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
  689. oldroot = root_blk->bp->data;
  690. ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  691. ASSERT(!oldroot->hdr.info.forw);
  692. ASSERT(!oldroot->hdr.info.back);
  693. /*
  694. * If the root has more than one child, then don't do anything.
  695. */
  696. if (be16_to_cpu(oldroot->hdr.count) > 1)
  697. return(0);
  698. /*
  699. * Read in the (only) child block, then copy those bytes into
  700. * the root block's buffer and free the original child block.
  701. */
  702. child = be32_to_cpu(oldroot->btree[0].before);
  703. ASSERT(child != 0);
  704. error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp,
  705. args->whichfork);
  706. if (error)
  707. return(error);
  708. ASSERT(bp != NULL);
  709. xfs_da_blkinfo_onlychild_validate(bp->data,
  710. be16_to_cpu(oldroot->hdr.level));
  711. memcpy(root_blk->bp->data, bp->data, state->blocksize);
  712. xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
  713. error = xfs_da_shrink_inode(args, child, bp);
  714. return(error);
  715. }
  716. /*
  717. * Check a node block and its neighbors to see if the block should be
  718. * collapsed into one or the other neighbor. Always keep the block
  719. * with the smaller block number.
  720. * If the current block is over 50% full, don't try to join it, return 0.
  721. * If the block is empty, fill in the state structure and return 2.
  722. * If it can be collapsed, fill in the state structure and return 1.
  723. * If nothing can be done, return 0.
  724. */
  725. STATIC int
  726. xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
  727. {
  728. xfs_da_intnode_t *node;
  729. xfs_da_state_blk_t *blk;
  730. xfs_da_blkinfo_t *info;
  731. int count, forward, error, retval, i;
  732. xfs_dablk_t blkno;
  733. xfs_dabuf_t *bp;
  734. /*
  735. * Check for the degenerate case of the block being over 50% full.
  736. * If so, it's not worth even looking to see if we might be able
  737. * to coalesce with a sibling.
  738. */
  739. blk = &state->path.blk[ state->path.active-1 ];
  740. info = blk->bp->data;
  741. ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  742. node = (xfs_da_intnode_t *)info;
  743. count = be16_to_cpu(node->hdr.count);
  744. if (count > (state->node_ents >> 1)) {
  745. *action = 0; /* blk over 50%, don't try to join */
  746. return(0); /* blk over 50%, don't try to join */
  747. }
  748. /*
  749. * Check for the degenerate case of the block being empty.
  750. * If the block is empty, we'll simply delete it, no need to
  751. * coalesce it with a sibling block. We choose (arbitrarily)
  752. * to merge with the forward block unless it is NULL.
  753. */
  754. if (count == 0) {
  755. /*
  756. * Make altpath point to the block we want to keep and
  757. * path point to the block we want to drop (this one).
  758. */
  759. forward = (info->forw != 0);
  760. memcpy(&state->altpath, &state->path, sizeof(state->path));
  761. error = xfs_da_path_shift(state, &state->altpath, forward,
  762. 0, &retval);
  763. if (error)
  764. return(error);
  765. if (retval) {
  766. *action = 0;
  767. } else {
  768. *action = 2;
  769. }
  770. return(0);
  771. }
  772. /*
  773. * Examine each sibling block to see if we can coalesce with
  774. * at least 25% free space to spare. We need to figure out
  775. * whether to merge with the forward or the backward block.
  776. * We prefer coalescing with the lower numbered sibling so as
  777. * to shrink a directory over time.
  778. */
  779. /* start with smaller blk num */
  780. forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
  781. for (i = 0; i < 2; forward = !forward, i++) {
  782. if (forward)
  783. blkno = be32_to_cpu(info->forw);
  784. else
  785. blkno = be32_to_cpu(info->back);
  786. if (blkno == 0)
  787. continue;
  788. error = xfs_da_read_buf(state->args->trans, state->args->dp,
  789. blkno, -1, &bp, state->args->whichfork);
  790. if (error)
  791. return(error);
  792. ASSERT(bp != NULL);
  793. node = (xfs_da_intnode_t *)info;
  794. count = state->node_ents;
  795. count -= state->node_ents >> 2;
  796. count -= be16_to_cpu(node->hdr.count);
  797. node = bp->data;
  798. ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  799. count -= be16_to_cpu(node->hdr.count);
  800. xfs_da_brelse(state->args->trans, bp);
  801. if (count >= 0)
  802. break; /* fits with at least 25% to spare */
  803. }
  804. if (i >= 2) {
  805. *action = 0;
  806. return(0);
  807. }
  808. /*
  809. * Make altpath point to the block we want to keep (the lower
  810. * numbered block) and path point to the block we want to drop.
  811. */
  812. memcpy(&state->altpath, &state->path, sizeof(state->path));
  813. if (blkno < blk->blkno) {
  814. error = xfs_da_path_shift(state, &state->altpath, forward,
  815. 0, &retval);
  816. if (error) {
  817. return(error);
  818. }
  819. if (retval) {
  820. *action = 0;
  821. return(0);
  822. }
  823. } else {
  824. error = xfs_da_path_shift(state, &state->path, forward,
  825. 0, &retval);
  826. if (error) {
  827. return(error);
  828. }
  829. if (retval) {
  830. *action = 0;
  831. return(0);
  832. }
  833. }
  834. *action = 1;
  835. return(0);
  836. }
  837. /*
  838. * Walk back up the tree adjusting hash values as necessary,
  839. * when we stop making changes, return.
  840. */
  841. void
  842. xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
  843. {
  844. xfs_da_state_blk_t *blk;
  845. xfs_da_intnode_t *node;
  846. xfs_da_node_entry_t *btree;
  847. xfs_dahash_t lasthash=0;
  848. int level, count;
  849. level = path->active-1;
  850. blk = &path->blk[ level ];
  851. switch (blk->magic) {
  852. case XFS_ATTR_LEAF_MAGIC:
  853. lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
  854. if (count == 0)
  855. return;
  856. break;
  857. case XFS_DIR2_LEAFN_MAGIC:
  858. lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
  859. if (count == 0)
  860. return;
  861. break;
  862. case XFS_DA_NODE_MAGIC:
  863. lasthash = xfs_da_node_lasthash(blk->bp, &count);
  864. if (count == 0)
  865. return;
  866. break;
  867. }
  868. for (blk--, level--; level >= 0; blk--, level--) {
  869. node = blk->bp->data;
  870. ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  871. btree = &node->btree[ blk->index ];
  872. if (be32_to_cpu(btree->hashval) == lasthash)
  873. break;
  874. blk->hashval = lasthash;
  875. btree->hashval = cpu_to_be32(lasthash);
  876. xfs_da_log_buf(state->args->trans, blk->bp,
  877. XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
  878. lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
  879. }
  880. }
  881. /*
  882. * Remove an entry from an intermediate node.
  883. */
  884. STATIC void
  885. xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
  886. {
  887. xfs_da_intnode_t *node;
  888. xfs_da_node_entry_t *btree;
  889. int tmp;
  890. trace_xfs_da_node_remove(state->args);
  891. node = drop_blk->bp->data;
  892. ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
  893. ASSERT(drop_blk->index >= 0);
  894. /*
  895. * Copy over the offending entry, or just zero it out.
  896. */
  897. btree = &node->btree[drop_blk->index];
  898. if (drop_blk->index < (be16_to_cpu(node->hdr.count)-1)) {
  899. tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
  900. tmp *= (uint)sizeof(xfs_da_node_entry_t);
  901. memmove(btree, btree + 1, tmp);
  902. xfs_da_log_buf(state->args->trans, drop_blk->bp,
  903. XFS_DA_LOGRANGE(node, btree, tmp));
  904. btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
  905. }
  906. memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
  907. xfs_da_log_buf(state->args->trans, drop_blk->bp,
  908. XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
  909. be16_add_cpu(&node->hdr.count, -1);
  910. xfs_da_log_buf(state->args->trans, drop_blk->bp,
  911. XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
  912. /*
  913. * Copy the last hash value from the block to propagate upwards.
  914. */
  915. btree--;
  916. drop_blk->hashval = be32_to_cpu(btree->hashval);
  917. }
  918. /*
  919. * Unbalance the btree elements between two intermediate nodes,
  920. * move all Btree elements from one node into another.
  921. */
  922. STATIC void
  923. xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
  924. xfs_da_state_blk_t *save_blk)
  925. {
  926. xfs_da_intnode_t *drop_node, *save_node;
  927. xfs_da_node_entry_t *btree;
  928. int tmp;
  929. xfs_trans_t *tp;
  930. trace_xfs_da_node_unbalance(state->args);
  931. drop_node = drop_blk->bp->data;
  932. save_node = save_blk->bp->data;
  933. ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  934. ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  935. tp = state->args->trans;
  936. /*
  937. * If the dying block has lower hashvals, then move all the
  938. * elements in the remaining block up to make a hole.
  939. */
  940. if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) ||
  941. (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) <
  942. be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval)))
  943. {
  944. btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)];
  945. tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
  946. memmove(btree, &save_node->btree[0], tmp);
  947. btree = &save_node->btree[0];
  948. xfs_da_log_buf(tp, save_blk->bp,
  949. XFS_DA_LOGRANGE(save_node, btree,
  950. (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
  951. sizeof(xfs_da_node_entry_t)));
  952. } else {
  953. btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
  954. xfs_da_log_buf(tp, save_blk->bp,
  955. XFS_DA_LOGRANGE(save_node, btree,
  956. be16_to_cpu(drop_node->hdr.count) *
  957. sizeof(xfs_da_node_entry_t)));
  958. }
  959. /*
  960. * Move all the B-tree elements from drop_blk to save_blk.
  961. */
  962. tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
  963. memcpy(btree, &drop_node->btree[0], tmp);
  964. be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
  965. xfs_da_log_buf(tp, save_blk->bp,
  966. XFS_DA_LOGRANGE(save_node, &save_node->hdr,
  967. sizeof(save_node->hdr)));
  968. /*
  969. * Save the last hashval in the remaining block for upward propagation.
  970. */
  971. save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval);
  972. }
  973. /*========================================================================
  974. * Routines used for finding things in the Btree.
  975. *========================================================================*/
  976. /*
  977. * Walk down the Btree looking for a particular filename, filling
  978. * in the state structure as we go.
  979. *
  980. * We will set the state structure to point to each of the elements
  981. * in each of the nodes where either the hashval is or should be.
  982. *
  983. * We support duplicate hashval's so for each entry in the current
  984. * node that could contain the desired hashval, descend. This is a
  985. * pruned depth-first tree search.
  986. */
  987. int /* error */
  988. xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
  989. {
  990. xfs_da_state_blk_t *blk;
  991. xfs_da_blkinfo_t *curr;
  992. xfs_da_intnode_t *node;
  993. xfs_da_node_entry_t *btree;
  994. xfs_dablk_t blkno;
  995. int probe, span, max, error, retval;
  996. xfs_dahash_t hashval, btreehashval;
  997. xfs_da_args_t *args;
  998. args = state->args;
  999. /*
  1000. * Descend thru the B-tree searching each level for the right
  1001. * node to use, until the right hashval is found.
  1002. */
  1003. blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
  1004. for (blk = &state->path.blk[0], state->path.active = 1;
  1005. state->path.active <= XFS_DA_NODE_MAXDEPTH;
  1006. blk++, state->path.active++) {
  1007. /*
  1008. * Read the next node down in the tree.
  1009. */
  1010. blk->blkno = blkno;
  1011. error = xfs_da_read_buf(args->trans, args->dp, blkno,
  1012. -1, &blk->bp, args->whichfork);
  1013. if (error) {
  1014. blk->blkno = 0;
  1015. state->path.active--;
  1016. return(error);
  1017. }
  1018. curr = blk->bp->data;
  1019. blk->magic = be16_to_cpu(curr->magic);
  1020. ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
  1021. blk->magic == XFS_DIR2_LEAFN_MAGIC ||
  1022. blk->magic == XFS_ATTR_LEAF_MAGIC);
  1023. /*
  1024. * Search an intermediate node for a match.
  1025. */
  1026. if (blk->magic == XFS_DA_NODE_MAGIC) {
  1027. node = blk->bp->data;
  1028. max = be16_to_cpu(node->hdr.count);
  1029. blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
  1030. /*
  1031. * Binary search. (note: small blocks will skip loop)
  1032. */
  1033. probe = span = max / 2;
  1034. hashval = args->hashval;
  1035. for (btree = &node->btree[probe]; span > 4;
  1036. btree = &node->btree[probe]) {
  1037. span /= 2;
  1038. btreehashval = be32_to_cpu(btree->hashval);
  1039. if (btreehashval < hashval)
  1040. probe += span;
  1041. else if (btreehashval > hashval)
  1042. probe -= span;
  1043. else
  1044. break;
  1045. }
  1046. ASSERT((probe >= 0) && (probe < max));
  1047. ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval));
  1048. /*
  1049. * Since we may have duplicate hashval's, find the first
  1050. * matching hashval in the node.
  1051. */
  1052. while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) {
  1053. btree--;
  1054. probe--;
  1055. }
  1056. while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) {
  1057. btree++;
  1058. probe++;
  1059. }
  1060. /*
  1061. * Pick the right block to descend on.
  1062. */
  1063. if (probe == max) {
  1064. blk->index = max-1;
  1065. blkno = be32_to_cpu(node->btree[max-1].before);
  1066. } else {
  1067. blk->index = probe;
  1068. blkno = be32_to_cpu(btree->before);
  1069. }
  1070. } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
  1071. blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
  1072. break;
  1073. } else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
  1074. blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
  1075. break;
  1076. }
  1077. }
  1078. /*
  1079. * A leaf block that ends in the hashval that we are interested in
  1080. * (final hashval == search hashval) means that the next block may
  1081. * contain more entries with the same hashval, shift upward to the
  1082. * next leaf and keep searching.
  1083. */
  1084. for (;;) {
  1085. if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
  1086. retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
  1087. &blk->index, state);
  1088. } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
  1089. retval = xfs_attr_leaf_lookup_int(blk->bp, args);
  1090. blk->index = args->index;
  1091. args->blkno = blk->blkno;
  1092. } else {
  1093. ASSERT(0);
  1094. return XFS_ERROR(EFSCORRUPTED);
  1095. }
  1096. if (((retval == ENOENT) || (retval == ENOATTR)) &&
  1097. (blk->hashval == args->hashval)) {
  1098. error = xfs_da_path_shift(state, &state->path, 1, 1,
  1099. &retval);
  1100. if (error)
  1101. return(error);
  1102. if (retval == 0) {
  1103. continue;
  1104. } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
  1105. /* path_shift() gives ENOENT */
  1106. retval = XFS_ERROR(ENOATTR);
  1107. }
  1108. }
  1109. break;
  1110. }
  1111. *result = retval;
  1112. return(0);
  1113. }
  1114. /*========================================================================
  1115. * Utility routines.
  1116. *========================================================================*/
  1117. /*
  1118. * Link a new block into a doubly linked list of blocks (of whatever type).
  1119. */
  1120. int /* error */
  1121. xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
  1122. xfs_da_state_blk_t *new_blk)
  1123. {
  1124. xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
  1125. xfs_da_args_t *args;
  1126. int before=0, error;
  1127. xfs_dabuf_t *bp;
  1128. /*
  1129. * Set up environment.
  1130. */
  1131. args = state->args;
  1132. ASSERT(args != NULL);
  1133. old_info = old_blk->bp->data;
  1134. new_info = new_blk->bp->data;
  1135. ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
  1136. old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
  1137. old_blk->magic == XFS_ATTR_LEAF_MAGIC);
  1138. ASSERT(old_blk->magic == be16_to_cpu(old_info->magic));
  1139. ASSERT(new_blk->magic == be16_to_cpu(new_info->magic));
  1140. ASSERT(old_blk->magic == new_blk->magic);
  1141. switch (old_blk->magic) {
  1142. case XFS_ATTR_LEAF_MAGIC:
  1143. before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
  1144. break;
  1145. case XFS_DIR2_LEAFN_MAGIC:
  1146. before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
  1147. break;
  1148. case XFS_DA_NODE_MAGIC:
  1149. before = xfs_da_node_order(old_blk->bp, new_blk->bp);
  1150. break;
  1151. }
  1152. /*
  1153. * Link blocks in appropriate order.
  1154. */
  1155. if (before) {
  1156. /*
  1157. * Link new block in before existing block.
  1158. */
  1159. trace_xfs_da_link_before(args);
  1160. new_info->forw = cpu_to_be32(old_blk->blkno);
  1161. new_info->back = old_info->back;
  1162. if (old_info->back) {
  1163. error = xfs_da_read_buf(args->trans, args->dp,
  1164. be32_to_cpu(old_info->back),
  1165. -1, &bp, args->whichfork);
  1166. if (error)
  1167. return(error);
  1168. ASSERT(bp != NULL);
  1169. tmp_info = bp->data;
  1170. ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
  1171. ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
  1172. tmp_info->forw = cpu_to_be32(new_blk->blkno);
  1173. xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
  1174. xfs_da_buf_done(bp);
  1175. }
  1176. old_info->back = cpu_to_be32(new_blk->blkno);
  1177. } else {
  1178. /*
  1179. * Link new block in after existing block.
  1180. */
  1181. trace_xfs_da_link_after(args);
  1182. new_info->forw = old_info->forw;
  1183. new_info->back = cpu_to_be32(old_blk->blkno);
  1184. if (old_info->forw) {
  1185. error = xfs_da_read_buf(args->trans, args->dp,
  1186. be32_to_cpu(old_info->forw),
  1187. -1, &bp, args->whichfork);
  1188. if (error)
  1189. return(error);
  1190. ASSERT(bp != NULL);
  1191. tmp_info = bp->data;
  1192. ASSERT(tmp_info->magic == old_info->magic);
  1193. ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
  1194. tmp_info->back = cpu_to_be32(new_blk->blkno);
  1195. xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
  1196. xfs_da_buf_done(bp);
  1197. }
  1198. old_info->forw = cpu_to_be32(new_blk->blkno);
  1199. }
  1200. xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
  1201. xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
  1202. return(0);
  1203. }
  1204. /*
  1205. * Compare two intermediate nodes for "order".
  1206. */
  1207. STATIC int
  1208. xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
  1209. {
  1210. xfs_da_intnode_t *node1, *node2;
  1211. node1 = node1_bp->data;
  1212. node2 = node2_bp->data;
  1213. ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
  1214. node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  1215. if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
  1216. ((be32_to_cpu(node2->btree[0].hashval) <
  1217. be32_to_cpu(node1->btree[0].hashval)) ||
  1218. (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
  1219. be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
  1220. return(1);
  1221. }
  1222. return(0);
  1223. }
  1224. /*
  1225. * Pick up the last hashvalue from an intermediate node.
  1226. */
  1227. STATIC uint
  1228. xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
  1229. {
  1230. xfs_da_intnode_t *node;
  1231. node = bp->data;
  1232. ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  1233. if (count)
  1234. *count = be16_to_cpu(node->hdr.count);
  1235. if (!node->hdr.count)
  1236. return(0);
  1237. return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
  1238. }
  1239. /*
  1240. * Unlink a block from a doubly linked list of blocks.
  1241. */
  1242. STATIC int /* error */
  1243. xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
  1244. xfs_da_state_blk_t *save_blk)
  1245. {
  1246. xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
  1247. xfs_da_args_t *args;
  1248. xfs_dabuf_t *bp;
  1249. int error;
  1250. /*
  1251. * Set up environment.
  1252. */
  1253. args = state->args;
  1254. ASSERT(args != NULL);
  1255. save_info = save_blk->bp->data;
  1256. drop_info = drop_blk->bp->data;
  1257. ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
  1258. save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
  1259. save_blk->magic == XFS_ATTR_LEAF_MAGIC);
  1260. ASSERT(save_blk->magic == be16_to_cpu(save_info->magic));
  1261. ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic));
  1262. ASSERT(save_blk->magic == drop_blk->magic);
  1263. ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
  1264. (be32_to_cpu(save_info->back) == drop_blk->blkno));
  1265. ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
  1266. (be32_to_cpu(drop_info->back) == save_blk->blkno));
  1267. /*
  1268. * Unlink the leaf block from the doubly linked chain of leaves.
  1269. */
  1270. if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
  1271. trace_xfs_da_unlink_back(args);
  1272. save_info->back = drop_info->back;
  1273. if (drop_info->back) {
  1274. error = xfs_da_read_buf(args->trans, args->dp,
  1275. be32_to_cpu(drop_info->back),
  1276. -1, &bp, args->whichfork);
  1277. if (error)
  1278. return(error);
  1279. ASSERT(bp != NULL);
  1280. tmp_info = bp->data;
  1281. ASSERT(tmp_info->magic == save_info->magic);
  1282. ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
  1283. tmp_info->forw = cpu_to_be32(save_blk->blkno);
  1284. xfs_da_log_buf(args->trans, bp, 0,
  1285. sizeof(*tmp_info) - 1);
  1286. xfs_da_buf_done(bp);
  1287. }
  1288. } else {
  1289. trace_xfs_da_unlink_forward(args);
  1290. save_info->forw = drop_info->forw;
  1291. if (drop_info->forw) {
  1292. error = xfs_da_read_buf(args->trans, args->dp,
  1293. be32_to_cpu(drop_info->forw),
  1294. -1, &bp, args->whichfork);
  1295. if (error)
  1296. return(error);
  1297. ASSERT(bp != NULL);
  1298. tmp_info = bp->data;
  1299. ASSERT(tmp_info->magic == save_info->magic);
  1300. ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
  1301. tmp_info->back = cpu_to_be32(save_blk->blkno);
  1302. xfs_da_log_buf(args->trans, bp, 0,
  1303. sizeof(*tmp_info) - 1);
  1304. xfs_da_buf_done(bp);
  1305. }
  1306. }
  1307. xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
  1308. return(0);
  1309. }
  1310. /*
  1311. * Move a path "forward" or "!forward" one block at the current level.
  1312. *
  1313. * This routine will adjust a "path" to point to the next block
  1314. * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
  1315. * Btree, including updating pointers to the intermediate nodes between
  1316. * the new bottom and the root.
  1317. */
  1318. int /* error */
  1319. xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
  1320. int forward, int release, int *result)
  1321. {
  1322. xfs_da_state_blk_t *blk;
  1323. xfs_da_blkinfo_t *info;
  1324. xfs_da_intnode_t *node;
  1325. xfs_da_args_t *args;
  1326. xfs_dablk_t blkno=0;
  1327. int level, error;
  1328. /*
  1329. * Roll up the Btree looking for the first block where our
  1330. * current index is not at the edge of the block. Note that
  1331. * we skip the bottom layer because we want the sibling block.
  1332. */
  1333. args = state->args;
  1334. ASSERT(args != NULL);
  1335. ASSERT(path != NULL);
  1336. ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
  1337. level = (path->active-1) - 1; /* skip bottom layer in path */
  1338. for (blk = &path->blk[level]; level >= 0; blk--, level--) {
  1339. ASSERT(blk->bp != NULL);
  1340. node = blk->bp->data;
  1341. ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  1342. if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
  1343. blk->index++;
  1344. blkno = be32_to_cpu(node->btree[blk->index].before);
  1345. break;
  1346. } else if (!forward && (blk->index > 0)) {
  1347. blk->index--;
  1348. blkno = be32_to_cpu(node->btree[blk->index].before);
  1349. break;
  1350. }
  1351. }
  1352. if (level < 0) {
  1353. *result = XFS_ERROR(ENOENT); /* we're out of our tree */
  1354. ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
  1355. return(0);
  1356. }
  1357. /*
  1358. * Roll down the edge of the subtree until we reach the
  1359. * same depth we were at originally.
  1360. */
  1361. for (blk++, level++; level < path->active; blk++, level++) {
  1362. /*
  1363. * Release the old block.
  1364. * (if it's dirty, trans won't actually let go)
  1365. */
  1366. if (release)
  1367. xfs_da_brelse(args->trans, blk->bp);
  1368. /*
  1369. * Read the next child block.
  1370. */
  1371. blk->blkno = blkno;
  1372. error = xfs_da_read_buf(args->trans, args->dp, blkno, -1,
  1373. &blk->bp, args->whichfork);
  1374. if (error)
  1375. return(error);
  1376. ASSERT(blk->bp != NULL);
  1377. info = blk->bp->data;
  1378. ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
  1379. info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
  1380. info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
  1381. blk->magic = be16_to_cpu(info->magic);
  1382. if (blk->magic == XFS_DA_NODE_MAGIC) {
  1383. node = (xfs_da_intnode_t *)info;
  1384. blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
  1385. if (forward)
  1386. blk->index = 0;
  1387. else
  1388. blk->index = be16_to_cpu(node->hdr.count)-1;
  1389. blkno = be32_to_cpu(node->btree[blk->index].before);
  1390. } else {
  1391. ASSERT(level == path->active-1);
  1392. blk->index = 0;
  1393. switch(blk->magic) {
  1394. case XFS_ATTR_LEAF_MAGIC:
  1395. blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
  1396. NULL);
  1397. break;
  1398. case XFS_DIR2_LEAFN_MAGIC:
  1399. blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
  1400. NULL);
  1401. break;
  1402. default:
  1403. ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC ||
  1404. blk->magic == XFS_DIR2_LEAFN_MAGIC);
  1405. break;
  1406. }
  1407. }
  1408. }
  1409. *result = 0;
  1410. return(0);
  1411. }
  1412. /*========================================================================
  1413. * Utility routines.
  1414. *========================================================================*/
  1415. /*
  1416. * Implement a simple hash on a character string.
  1417. * Rotate the hash value by 7 bits, then XOR each character in.
  1418. * This is implemented with some source-level loop unrolling.
  1419. */
  1420. xfs_dahash_t
  1421. xfs_da_hashname(const __uint8_t *name, int namelen)
  1422. {
  1423. xfs_dahash_t hash;
  1424. /*
  1425. * Do four characters at a time as long as we can.
  1426. */
  1427. for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
  1428. hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
  1429. (name[3] << 0) ^ rol32(hash, 7 * 4);
  1430. /*
  1431. * Now do the rest of the characters.
  1432. */
  1433. switch (namelen) {
  1434. case 3:
  1435. return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
  1436. rol32(hash, 7 * 3);
  1437. case 2:
  1438. return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
  1439. case 1:
  1440. return (name[0] << 0) ^ rol32(hash, 7 * 1);
  1441. default: /* case 0: */
  1442. return hash;
  1443. }
  1444. }
  1445. enum xfs_dacmp
  1446. xfs_da_compname(
  1447. struct xfs_da_args *args,
  1448. const unsigned char *name,
  1449. int len)
  1450. {
  1451. return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
  1452. XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
  1453. }
  1454. static xfs_dahash_t
  1455. xfs_default_hashname(
  1456. struct xfs_name *name)
  1457. {
  1458. return xfs_da_hashname(name->name, name->len);
  1459. }
  1460. const struct xfs_nameops xfs_default_nameops = {
  1461. .hashname = xfs_default_hashname,
  1462. .compname = xfs_da_compname
  1463. };
  1464. int
  1465. xfs_da_grow_inode_int(
  1466. struct xfs_da_args *args,
  1467. xfs_fileoff_t *bno,
  1468. int count)
  1469. {
  1470. struct xfs_trans *tp = args->trans;
  1471. struct xfs_inode *dp = args->dp;
  1472. int w = args->whichfork;
  1473. xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
  1474. struct xfs_bmbt_irec map, *mapp;
  1475. int nmap, error, got, i, mapi;
  1476. /*
  1477. * Find a spot in the file space to put the new block.
  1478. */
  1479. error = xfs_bmap_first_unused(tp, dp, count, bno, w);
  1480. if (error)
  1481. return error;
  1482. /*
  1483. * Try mapping it in one filesystem block.
  1484. */
  1485. nmap = 1;
  1486. ASSERT(args->firstblock != NULL);
  1487. error = xfs_bmapi_write(tp, dp, *bno, count,
  1488. xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
  1489. args->firstblock, args->total, &map, &nmap,
  1490. args->flist);
  1491. if (error)
  1492. return error;
  1493. ASSERT(nmap <= 1);
  1494. if (nmap == 1) {
  1495. mapp = &map;
  1496. mapi = 1;
  1497. } else if (nmap == 0 && count > 1) {
  1498. xfs_fileoff_t b;
  1499. int c;
  1500. /*
  1501. * If we didn't get it and the block might work if fragmented,
  1502. * try without the CONTIG flag. Loop until we get it all.
  1503. */
  1504. mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
  1505. for (b = *bno, mapi = 0; b < *bno + count; ) {
  1506. nmap = MIN(XFS_BMAP_MAX_NMAP, count);
  1507. c = (int)(*bno + count - b);
  1508. error = xfs_bmapi_write(tp, dp, b, c,
  1509. xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
  1510. args->firstblock, args->total,
  1511. &mapp[mapi], &nmap, args->flist);
  1512. if (error)
  1513. goto out_free_map;
  1514. if (nmap < 1)
  1515. break;
  1516. mapi += nmap;
  1517. b = mapp[mapi - 1].br_startoff +
  1518. mapp[mapi - 1].br_blockcount;
  1519. }
  1520. } else {
  1521. mapi = 0;
  1522. mapp = NULL;
  1523. }
  1524. /*
  1525. * Count the blocks we got, make sure it matches the total.
  1526. */
  1527. for (i = 0, got = 0; i < mapi; i++)
  1528. got += mapp[i].br_blockcount;
  1529. if (got != count || mapp[0].br_startoff != *bno ||
  1530. mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
  1531. *bno + count) {
  1532. error = XFS_ERROR(ENOSPC);
  1533. goto out_free_map;
  1534. }
  1535. /* account for newly allocated blocks in reserved blocks total */
  1536. args->total -= dp->i_d.di_nblocks - nblks;
  1537. out_free_map:
  1538. if (mapp != &map)
  1539. kmem_free(mapp);
  1540. return error;
  1541. }
  1542. /*
  1543. * Add a block to the btree ahead of the file.
  1544. * Return the new block number to the caller.
  1545. */
  1546. int
  1547. xfs_da_grow_inode(
  1548. struct xfs_da_args *args,
  1549. xfs_dablk_t *new_blkno)
  1550. {
  1551. xfs_fileoff_t bno;
  1552. int count;
  1553. int error;
  1554. trace_xfs_da_grow_inode(args);
  1555. if (args->whichfork == XFS_DATA_FORK) {
  1556. bno = args->dp->i_mount->m_dirleafblk;
  1557. count = args->dp->i_mount->m_dirblkfsbs;
  1558. } else {
  1559. bno = 0;
  1560. count = 1;
  1561. }
  1562. error = xfs_da_grow_inode_int(args, &bno, count);
  1563. if (!error)
  1564. *new_blkno = (xfs_dablk_t)bno;
  1565. return error;
  1566. }
  1567. /*
  1568. * Ick. We need to always be able to remove a btree block, even
  1569. * if there's no space reservation because the filesystem is full.
  1570. * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
  1571. * It swaps the target block with the last block in the file. The
  1572. * last block in the file can always be removed since it can't cause
  1573. * a bmap btree split to do that.
  1574. */
  1575. STATIC int
  1576. xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
  1577. xfs_dabuf_t **dead_bufp)
  1578. {
  1579. xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
  1580. xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf;
  1581. xfs_fileoff_t lastoff;
  1582. xfs_inode_t *ip;
  1583. xfs_trans_t *tp;
  1584. xfs_mount_t *mp;
  1585. int error, w, entno, level, dead_level;
  1586. xfs_da_blkinfo_t *dead_info, *sib_info;
  1587. xfs_da_intnode_t *par_node, *dead_node;
  1588. xfs_dir2_leaf_t *dead_leaf2;
  1589. xfs_dahash_t dead_hash;
  1590. trace_xfs_da_swap_lastblock(args);
  1591. dead_buf = *dead_bufp;
  1592. dead_blkno = *dead_blknop;
  1593. tp = args->trans;
  1594. ip = args->dp;
  1595. w = args->whichfork;
  1596. ASSERT(w == XFS_DATA_FORK);
  1597. mp = ip->i_mount;
  1598. lastoff = mp->m_dirfreeblk;
  1599. error = xfs_bmap_last_before(tp, ip, &lastoff, w);
  1600. if (error)
  1601. return error;
  1602. if (unlikely(lastoff == 0)) {
  1603. XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
  1604. mp);
  1605. return XFS_ERROR(EFSCORRUPTED);
  1606. }
  1607. /*
  1608. * Read the last block in the btree space.
  1609. */
  1610. last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
  1611. if ((error = xfs_da_read_buf(tp, ip, last_blkno, -1, &last_buf, w)))
  1612. return error;
  1613. /*
  1614. * Copy the last block into the dead buffer and log it.
  1615. */
  1616. memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize);
  1617. xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
  1618. dead_info = dead_buf->data;
  1619. /*
  1620. * Get values from the moved block.
  1621. */
  1622. if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
  1623. dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
  1624. dead_level = 0;
  1625. dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval);
  1626. } else {
  1627. ASSERT(dead_info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
  1628. dead_node = (xfs_da_intnode_t *)dead_info;
  1629. dead_level = be16_to_cpu(dead_node->hdr.level);
  1630. dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval);
  1631. }
  1632. sib_buf = par_buf = NULL;
  1633. /*
  1634. * If the moved block has a left sibling, fix up the pointers.
  1635. */
  1636. if ((sib_blkno = be32_to_cpu(dead_info->back))) {
  1637. if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
  1638. goto done;
  1639. sib_info = sib_buf->data;
  1640. if (unlikely(
  1641. be32_to_cpu(sib_info->forw) != last_blkno ||
  1642. sib_info->magic != dead_info->magic)) {
  1643. XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
  1644. XFS_ERRLEVEL_LOW, mp);
  1645. error = XFS_ERROR(EFSCORRUPTED);
  1646. goto done;
  1647. }
  1648. sib_info->forw = cpu_to_be32(dead_blkno);
  1649. xfs_da_log_buf(tp, sib_buf,
  1650. XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
  1651. sizeof(sib_info->forw)));
  1652. xfs_da_buf_done(sib_buf);
  1653. sib_buf = NULL;
  1654. }
  1655. /*
  1656. * If the moved block has a right sibling, fix up the pointers.
  1657. */
  1658. if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
  1659. if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
  1660. goto done;
  1661. sib_info = sib_buf->data;
  1662. if (unlikely(
  1663. be32_to_cpu(sib_info->back) != last_blkno ||
  1664. sib_info->magic != dead_info->magic)) {
  1665. XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
  1666. XFS_ERRLEVEL_LOW, mp);
  1667. error = XFS_ERROR(EFSCORRUPTED);
  1668. goto done;
  1669. }
  1670. sib_info->back = cpu_to_be32(dead_blkno);
  1671. xfs_da_log_buf(tp, sib_buf,
  1672. XFS_DA_LOGRANGE(sib_info, &sib_info->back,
  1673. sizeof(sib_info->back)));
  1674. xfs_da_buf_done(sib_buf);
  1675. sib_buf = NULL;
  1676. }
  1677. par_blkno = mp->m_dirleafblk;
  1678. level = -1;
  1679. /*
  1680. * Walk down the tree looking for the parent of the moved block.
  1681. */
  1682. for (;;) {
  1683. if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
  1684. goto done;
  1685. par_node = par_buf->data;
  1686. if (unlikely(par_node->hdr.info.magic !=
  1687. cpu_to_be16(XFS_DA_NODE_MAGIC) ||
  1688. (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
  1689. XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
  1690. XFS_ERRLEVEL_LOW, mp);
  1691. error = XFS_ERROR(EFSCORRUPTED);
  1692. goto done;
  1693. }
  1694. level = be16_to_cpu(par_node->hdr.level);
  1695. for (entno = 0;
  1696. entno < be16_to_cpu(par_node->hdr.count) &&
  1697. be32_to_cpu(par_node->btree[entno].hashval) < dead_hash;
  1698. entno++)
  1699. continue;
  1700. if (unlikely(entno == be16_to_cpu(par_node->hdr.count))) {
  1701. XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
  1702. XFS_ERRLEVEL_LOW, mp);
  1703. error = XFS_ERROR(EFSCORRUPTED);
  1704. goto done;
  1705. }
  1706. par_blkno = be32_to_cpu(par_node->btree[entno].before);
  1707. if (level == dead_level + 1)
  1708. break;
  1709. xfs_da_brelse(tp, par_buf);
  1710. par_buf = NULL;
  1711. }
  1712. /*
  1713. * We're in the right parent block.
  1714. * Look for the right entry.
  1715. */
  1716. for (;;) {
  1717. for (;
  1718. entno < be16_to_cpu(par_node->hdr.count) &&
  1719. be32_to_cpu(par_node->btree[entno].before) != last_blkno;
  1720. entno++)
  1721. continue;
  1722. if (entno < be16_to_cpu(par_node->hdr.count))
  1723. break;
  1724. par_blkno = be32_to_cpu(par_node->hdr.info.forw);
  1725. xfs_da_brelse(tp, par_buf);
  1726. par_buf = NULL;
  1727. if (unlikely(par_blkno == 0)) {
  1728. XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
  1729. XFS_ERRLEVEL_LOW, mp);
  1730. error = XFS_ERROR(EFSCORRUPTED);
  1731. goto done;
  1732. }
  1733. if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
  1734. goto done;
  1735. par_node = par_buf->data;
  1736. if (unlikely(
  1737. be16_to_cpu(par_node->hdr.level) != level ||
  1738. par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
  1739. XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
  1740. XFS_ERRLEVEL_LOW, mp);
  1741. error = XFS_ERROR(EFSCORRUPTED);
  1742. goto done;
  1743. }
  1744. entno = 0;
  1745. }
  1746. /*
  1747. * Update the parent entry pointing to the moved block.
  1748. */
  1749. par_node->btree[entno].before = cpu_to_be32(dead_blkno);
  1750. xfs_da_log_buf(tp, par_buf,
  1751. XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
  1752. sizeof(par_node->btree[entno].before)));
  1753. xfs_da_buf_done(par_buf);
  1754. xfs_da_buf_done(dead_buf);
  1755. *dead_blknop = last_blkno;
  1756. *dead_bufp = last_buf;
  1757. return 0;
  1758. done:
  1759. if (par_buf)
  1760. xfs_da_brelse(tp, par_buf);
  1761. if (sib_buf)
  1762. xfs_da_brelse(tp, sib_buf);
  1763. xfs_da_brelse(tp, last_buf);
  1764. return error;
  1765. }
  1766. /*
  1767. * Remove a btree block from a directory or attribute.
  1768. */
  1769. int
  1770. xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
  1771. xfs_dabuf_t *dead_buf)
  1772. {
  1773. xfs_inode_t *dp;
  1774. int done, error, w, count;
  1775. xfs_trans_t *tp;
  1776. xfs_mount_t *mp;
  1777. trace_xfs_da_shrink_inode(args);
  1778. dp = args->dp;
  1779. w = args->whichfork;
  1780. tp = args->trans;
  1781. mp = dp->i_mount;
  1782. if (w == XFS_DATA_FORK)
  1783. count = mp->m_dirblkfsbs;
  1784. else
  1785. count = 1;
  1786. for (;;) {
  1787. /*
  1788. * Remove extents. If we get ENOSPC for a dir we have to move
  1789. * the last block to the place we want to kill.
  1790. */
  1791. if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
  1792. xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
  1793. 0, args->firstblock, args->flist,
  1794. &done)) == ENOSPC) {
  1795. if (w != XFS_DATA_FORK)
  1796. break;
  1797. if ((error = xfs_da_swap_lastblock(args, &dead_blkno,
  1798. &dead_buf)))
  1799. break;
  1800. } else {
  1801. break;
  1802. }
  1803. }
  1804. xfs_da_binval(tp, dead_buf);
  1805. return error;
  1806. }
  1807. /*
  1808. * See if the mapping(s) for this btree block are valid, i.e.
  1809. * don't contain holes, are logically contiguous, and cover the whole range.
  1810. */
  1811. STATIC int
  1812. xfs_da_map_covers_blocks(
  1813. int nmap,
  1814. xfs_bmbt_irec_t *mapp,
  1815. xfs_dablk_t bno,
  1816. int count)
  1817. {
  1818. int i;
  1819. xfs_fileoff_t off;
  1820. for (i = 0, off = bno; i < nmap; i++) {
  1821. if (mapp[i].br_startblock == HOLESTARTBLOCK ||
  1822. mapp[i].br_startblock == DELAYSTARTBLOCK) {
  1823. return 0;
  1824. }
  1825. if (off != mapp[i].br_startoff) {
  1826. return 0;
  1827. }
  1828. off += mapp[i].br_blockcount;
  1829. }
  1830. return off == bno + count;
  1831. }
  1832. /*
  1833. * Make a dabuf.
  1834. * Used for get_buf, read_buf, read_bufr, and reada_buf.
  1835. */
  1836. STATIC int
  1837. xfs_da_do_buf(
  1838. xfs_trans_t *trans,
  1839. xfs_inode_t *dp,
  1840. xfs_dablk_t bno,
  1841. xfs_daddr_t *mappedbnop,
  1842. xfs_dabuf_t **bpp,
  1843. int whichfork,
  1844. int caller)
  1845. {
  1846. xfs_buf_t *bp = NULL;
  1847. xfs_buf_t **bplist;
  1848. int error=0;
  1849. int i;
  1850. xfs_bmbt_irec_t map;
  1851. xfs_bmbt_irec_t *mapp;
  1852. xfs_daddr_t mappedbno;
  1853. xfs_mount_t *mp;
  1854. int nbplist=0;
  1855. int nfsb;
  1856. int nmap;
  1857. xfs_dabuf_t *rbp;
  1858. mp = dp->i_mount;
  1859. nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
  1860. mappedbno = *mappedbnop;
  1861. /*
  1862. * Caller doesn't have a mapping. -2 means don't complain
  1863. * if we land in a hole.
  1864. */
  1865. if (mappedbno == -1 || mappedbno == -2) {
  1866. /*
  1867. * Optimize the one-block case.
  1868. */
  1869. if (nfsb == 1)
  1870. mapp = &map;
  1871. else
  1872. mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP);
  1873. nmap = nfsb;
  1874. error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, mapp,
  1875. &nmap, xfs_bmapi_aflag(whichfork));
  1876. if (error)
  1877. goto exit0;
  1878. } else {
  1879. map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
  1880. map.br_startoff = (xfs_fileoff_t)bno;
  1881. map.br_blockcount = nfsb;
  1882. mapp = &map;
  1883. nmap = 1;
  1884. }
  1885. if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) {
  1886. error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
  1887. if (unlikely(error == EFSCORRUPTED)) {
  1888. if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
  1889. xfs_alert(mp, "%s: bno %lld dir: inode %lld",
  1890. __func__, (long long)bno,
  1891. (long long)dp->i_ino);
  1892. for (i = 0; i < nmap; i++) {
  1893. xfs_alert(mp,
  1894. "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
  1895. i,
  1896. (long long)mapp[i].br_startoff,
  1897. (long long)mapp[i].br_startblock,
  1898. (long long)mapp[i].br_blockcount,
  1899. mapp[i].br_state);
  1900. }
  1901. }
  1902. XFS_ERROR_REPORT("xfs_da_do_buf(1)",
  1903. XFS_ERRLEVEL_LOW, mp);
  1904. }
  1905. goto exit0;
  1906. }
  1907. if (caller != 3 && nmap > 1) {
  1908. bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP);
  1909. nbplist = 0;
  1910. } else
  1911. bplist = NULL;
  1912. /*
  1913. * Turn the mapping(s) into buffer(s).
  1914. */
  1915. for (i = 0; i < nmap; i++) {
  1916. int nmapped;
  1917. mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock);
  1918. if (i == 0)
  1919. *mappedbnop = mappedbno;
  1920. nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount);
  1921. switch (caller) {
  1922. case 0:
  1923. bp = xfs_trans_get_buf(trans, mp->m_ddev_targp,
  1924. mappedbno, nmapped, 0);
  1925. error = bp ? bp->b_error : XFS_ERROR(EIO);
  1926. break;
  1927. case 1:
  1928. case 2:
  1929. bp = NULL;
  1930. error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp,
  1931. mappedbno, nmapped, 0, &bp);
  1932. break;
  1933. case 3:
  1934. xfs_buf_readahead(mp->m_ddev_targp, mappedbno, nmapped);
  1935. error = 0;
  1936. bp = NULL;
  1937. break;
  1938. }
  1939. if (error) {
  1940. if (bp)
  1941. xfs_trans_brelse(trans, bp);
  1942. goto exit1;
  1943. }
  1944. if (!bp)
  1945. continue;
  1946. if (caller == 1) {
  1947. if (whichfork == XFS_ATTR_FORK)
  1948. xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
  1949. else
  1950. xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
  1951. }
  1952. if (bplist) {
  1953. bplist[nbplist++] = bp;
  1954. }
  1955. }
  1956. /*
  1957. * Build a dabuf structure.
  1958. */
  1959. if (bplist) {
  1960. rbp = xfs_da_buf_make(nbplist, bplist);
  1961. } else if (bp)
  1962. rbp = xfs_da_buf_make(1, &bp);
  1963. else
  1964. rbp = NULL;
  1965. /*
  1966. * For read_buf, check the magic number.
  1967. */
  1968. if (caller == 1) {
  1969. xfs_dir2_data_hdr_t *hdr = rbp->data;
  1970. xfs_dir2_free_t *free = rbp->data;
  1971. xfs_da_blkinfo_t *info = rbp->data;
  1972. uint magic, magic1;
  1973. magic = be16_to_cpu(info->magic);
  1974. magic1 = be32_to_cpu(hdr->magic);
  1975. if (unlikely(
  1976. XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
  1977. (magic != XFS_ATTR_LEAF_MAGIC) &&
  1978. (magic != XFS_DIR2_LEAF1_MAGIC) &&
  1979. (magic != XFS_DIR2_LEAFN_MAGIC) &&
  1980. (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
  1981. (magic1 != XFS_DIR2_DATA_MAGIC) &&
  1982. (free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
  1983. mp, XFS_ERRTAG_DA_READ_BUF,
  1984. XFS_RANDOM_DA_READ_BUF))) {
  1985. trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
  1986. XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
  1987. XFS_ERRLEVEL_LOW, mp, info);
  1988. error = XFS_ERROR(EFSCORRUPTED);
  1989. xfs_da_brelse(trans, rbp);
  1990. nbplist = 0;
  1991. goto exit1;
  1992. }
  1993. }
  1994. if (bplist) {
  1995. kmem_free(bplist);
  1996. }
  1997. if (mapp != &map) {
  1998. kmem_free(mapp);
  1999. }
  2000. if (bpp)
  2001. *bpp = rbp;
  2002. return 0;
  2003. exit1:
  2004. if (bplist) {
  2005. for (i = 0; i < nbplist; i++)
  2006. xfs_trans_brelse(trans, bplist[i]);
  2007. kmem_free(bplist);
  2008. }
  2009. exit0:
  2010. if (mapp != &map)
  2011. kmem_free(mapp);
  2012. if (bpp)
  2013. *bpp = NULL;
  2014. return error;
  2015. }
  2016. /*
  2017. * Get a buffer for the dir/attr block.
  2018. */
  2019. int
  2020. xfs_da_get_buf(
  2021. xfs_trans_t *trans,
  2022. xfs_inode_t *dp,
  2023. xfs_dablk_t bno,
  2024. xfs_daddr_t mappedbno,
  2025. xfs_dabuf_t **bpp,
  2026. int whichfork)
  2027. {
  2028. return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0);
  2029. }
  2030. /*
  2031. * Get a buffer for the dir/attr block, fill in the contents.
  2032. */
  2033. int
  2034. xfs_da_read_buf(
  2035. xfs_trans_t *trans,
  2036. xfs_inode_t *dp,
  2037. xfs_dablk_t bno,
  2038. xfs_daddr_t mappedbno,
  2039. xfs_dabuf_t **bpp,
  2040. int whichfork)
  2041. {
  2042. return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1);
  2043. }
  2044. /*
  2045. * Readahead the dir/attr block.
  2046. */
  2047. xfs_daddr_t
  2048. xfs_da_reada_buf(
  2049. xfs_trans_t *trans,
  2050. xfs_inode_t *dp,
  2051. xfs_dablk_t bno,
  2052. int whichfork)
  2053. {
  2054. xfs_daddr_t rval;
  2055. rval = -1;
  2056. if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3))
  2057. return -1;
  2058. else
  2059. return rval;
  2060. }
  2061. kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
  2062. kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */
  2063. /*
  2064. * Allocate a dir-state structure.
  2065. * We don't put them on the stack since they're large.
  2066. */
  2067. xfs_da_state_t *
  2068. xfs_da_state_alloc(void)
  2069. {
  2070. return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
  2071. }
  2072. /*
  2073. * Kill the altpath contents of a da-state structure.
  2074. */
  2075. STATIC void
  2076. xfs_da_state_kill_altpath(xfs_da_state_t *state)
  2077. {
  2078. int i;
  2079. for (i = 0; i < state->altpath.active; i++) {
  2080. if (state->altpath.blk[i].bp) {
  2081. if (state->altpath.blk[i].bp != state->path.blk[i].bp)
  2082. xfs_da_buf_done(state->altpath.blk[i].bp);
  2083. state->altpath.blk[i].bp = NULL;
  2084. }
  2085. }
  2086. state->altpath.active = 0;
  2087. }
  2088. /*
  2089. * Free a da-state structure.
  2090. */
  2091. void
  2092. xfs_da_state_free(xfs_da_state_t *state)
  2093. {
  2094. int i;
  2095. xfs_da_state_kill_altpath(state);
  2096. for (i = 0; i < state->path.active; i++) {
  2097. if (state->path.blk[i].bp)
  2098. xfs_da_buf_done(state->path.blk[i].bp);
  2099. }
  2100. if (state->extravalid && state->extrablk.bp)
  2101. xfs_da_buf_done(state->extrablk.bp);
  2102. #ifdef DEBUG
  2103. memset((char *)state, 0, sizeof(*state));
  2104. #endif /* DEBUG */
  2105. kmem_zone_free(xfs_da_state_zone, state);
  2106. }
  2107. /*
  2108. * Create a dabuf.
  2109. */
  2110. /* ARGSUSED */
  2111. STATIC xfs_dabuf_t *
  2112. xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
  2113. {
  2114. xfs_buf_t *bp;
  2115. xfs_dabuf_t *dabuf;
  2116. int i;
  2117. int off;
  2118. if (nbuf == 1)
  2119. dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS);
  2120. else
  2121. dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
  2122. dabuf->dirty = 0;
  2123. if (nbuf == 1) {
  2124. dabuf->nbuf = 1;
  2125. bp = bps[0];
  2126. dabuf->bbcount = bp->b_length;
  2127. dabuf->data = bp->b_addr;
  2128. dabuf->bps[0] = bp;
  2129. } else {
  2130. dabuf->nbuf = nbuf;
  2131. for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
  2132. dabuf->bps[i] = bp = bps[i];
  2133. dabuf->bbcount += bp->b_length;
  2134. }
  2135. dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
  2136. for (i = off = 0; i < nbuf; i++, off += BBTOB(bp->b_length)) {
  2137. bp = bps[i];
  2138. memcpy((char *)dabuf->data + off, bp->b_addr,
  2139. BBTOB(bp->b_length));
  2140. }
  2141. }
  2142. return dabuf;
  2143. }
  2144. /*
  2145. * Un-dirty a dabuf.
  2146. */
  2147. STATIC void
  2148. xfs_da_buf_clean(xfs_dabuf_t *dabuf)
  2149. {
  2150. xfs_buf_t *bp;
  2151. int i;
  2152. int off;
  2153. if (dabuf->dirty) {
  2154. ASSERT(dabuf->nbuf > 1);
  2155. dabuf->dirty = 0;
  2156. for (i = off = 0; i < dabuf->nbuf;
  2157. i++, off += BBTOB(bp->b_length)) {
  2158. bp = dabuf->bps[i];
  2159. memcpy(bp->b_addr, dabuf->data + off,
  2160. BBTOB(bp->b_length));
  2161. }
  2162. }
  2163. }
  2164. /*
  2165. * Release a dabuf.
  2166. */
  2167. void
  2168. xfs_da_buf_done(xfs_dabuf_t *dabuf)
  2169. {
  2170. ASSERT(dabuf);
  2171. ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
  2172. if (dabuf->dirty)
  2173. xfs_da_buf_clean(dabuf);
  2174. if (dabuf->nbuf > 1) {
  2175. kmem_free(dabuf->data);
  2176. kmem_free(dabuf);
  2177. } else {
  2178. kmem_zone_free(xfs_dabuf_zone, dabuf);
  2179. }
  2180. }
  2181. /*
  2182. * Log transaction from a dabuf.
  2183. */
  2184. void
  2185. xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
  2186. {
  2187. xfs_buf_t *bp;
  2188. uint f;
  2189. int i;
  2190. uint l;
  2191. int off;
  2192. ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
  2193. if (dabuf->nbuf == 1) {
  2194. ASSERT(dabuf->data == dabuf->bps[0]->b_addr);
  2195. xfs_trans_log_buf(tp, dabuf->bps[0], first, last);
  2196. return;
  2197. }
  2198. dabuf->dirty = 1;
  2199. ASSERT(first <= last);
  2200. for (i = off = 0; i < dabuf->nbuf; i++, off += BBTOB(bp->b_length)) {
  2201. bp = dabuf->bps[i];
  2202. f = off;
  2203. l = f + BBTOB(bp->b_length) - 1;
  2204. if (f < first)
  2205. f = first;
  2206. if (l > last)
  2207. l = last;
  2208. if (f <= l)
  2209. xfs_trans_log_buf(tp, bp, f - off, l - off);
  2210. /*
  2211. * B_DONE is set by xfs_trans_log buf.
  2212. * If we don't set it on a new buffer (get not read)
  2213. * then if we don't put anything in the buffer it won't
  2214. * be set, and at commit it it released into the cache,
  2215. * and then a read will fail.
  2216. */
  2217. else if (!(XFS_BUF_ISDONE(bp)))
  2218. XFS_BUF_DONE(bp);
  2219. }
  2220. ASSERT(last < off);
  2221. }
  2222. /*
  2223. * Release dabuf from a transaction.
  2224. * Have to free up the dabuf before the buffers are released,
  2225. * since the synchronization on the dabuf is really the lock on the buffer.
  2226. */
  2227. void
  2228. xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
  2229. {
  2230. xfs_buf_t *bp;
  2231. xfs_buf_t **bplist;
  2232. int i;
  2233. int nbuf;
  2234. ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
  2235. if ((nbuf = dabuf->nbuf) == 1) {
  2236. bplist = &bp;
  2237. bp = dabuf->bps[0];
  2238. } else {
  2239. bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
  2240. memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
  2241. }
  2242. xfs_da_buf_done(dabuf);
  2243. for (i = 0; i < nbuf; i++)
  2244. xfs_trans_brelse(tp, bplist[i]);
  2245. if (bplist != &bp)
  2246. kmem_free(bplist);
  2247. }
  2248. /*
  2249. * Invalidate dabuf from a transaction.
  2250. */
  2251. void
  2252. xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
  2253. {
  2254. xfs_buf_t *bp;
  2255. xfs_buf_t **bplist;
  2256. int i;
  2257. int nbuf;
  2258. ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
  2259. if ((nbuf = dabuf->nbuf) == 1) {
  2260. bplist = &bp;
  2261. bp = dabuf->bps[0];
  2262. } else {
  2263. bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
  2264. memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
  2265. }
  2266. xfs_da_buf_done(dabuf);
  2267. for (i = 0; i < nbuf; i++)
  2268. xfs_trans_binval(tp, bplist[i]);
  2269. if (bplist != &bp)
  2270. kmem_free(bplist);
  2271. }
  2272. /*
  2273. * Get the first daddr from a dabuf.
  2274. */
  2275. xfs_daddr_t
  2276. xfs_da_blkno(xfs_dabuf_t *dabuf)
  2277. {
  2278. ASSERT(dabuf->nbuf);
  2279. ASSERT(dabuf->data);
  2280. return XFS_BUF_ADDR(dabuf->bps[0]);
  2281. }