btree.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195
  1. /*
  2. * btree.c - NILFS B-tree.
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Koji Sato <koji@osrg.net>.
  21. */
  22. #include <linux/slab.h>
  23. #include <linux/string.h>
  24. #include <linux/errno.h>
  25. #include <linux/pagevec.h>
  26. #include "nilfs.h"
  27. #include "page.h"
  28. #include "btnode.h"
  29. #include "btree.h"
  30. #include "alloc.h"
  31. #include "dat.h"
  32. static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
  33. {
  34. struct nilfs_btree_path *path;
  35. int level = NILFS_BTREE_LEVEL_DATA;
  36. path = kmem_cache_alloc(nilfs_btree_path_cache, GFP_NOFS);
  37. if (path == NULL)
  38. goto out;
  39. for (; level < NILFS_BTREE_LEVEL_MAX; level++) {
  40. path[level].bp_bh = NULL;
  41. path[level].bp_sib_bh = NULL;
  42. path[level].bp_index = 0;
  43. path[level].bp_oldreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
  44. path[level].bp_newreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
  45. path[level].bp_op = NULL;
  46. }
  47. out:
  48. return path;
  49. }
  50. static void nilfs_btree_free_path(struct nilfs_btree_path *path)
  51. {
  52. int level = NILFS_BTREE_LEVEL_DATA;
  53. for (; level < NILFS_BTREE_LEVEL_MAX; level++)
  54. brelse(path[level].bp_bh);
  55. kmem_cache_free(nilfs_btree_path_cache, path);
  56. }
  57. /*
  58. * B-tree node operations
  59. */
  60. static int nilfs_btree_get_block(const struct nilfs_btree *btree, __u64 ptr,
  61. struct buffer_head **bhp)
  62. {
  63. struct address_space *btnc =
  64. &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
  65. int err;
  66. err = nilfs_btnode_submit_block(btnc, ptr, 0, bhp);
  67. if (err)
  68. return err == -EEXIST ? 0 : err;
  69. wait_on_buffer(*bhp);
  70. if (!buffer_uptodate(*bhp)) {
  71. brelse(*bhp);
  72. return -EIO;
  73. }
  74. return 0;
  75. }
  76. static int nilfs_btree_get_new_block(const struct nilfs_btree *btree,
  77. __u64 ptr, struct buffer_head **bhp)
  78. {
  79. struct address_space *btnc =
  80. &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
  81. struct buffer_head *bh;
  82. bh = nilfs_btnode_create_block(btnc, ptr);
  83. if (!bh)
  84. return -ENOMEM;
  85. set_buffer_nilfs_volatile(bh);
  86. *bhp = bh;
  87. return 0;
  88. }
  89. static inline int
  90. nilfs_btree_node_get_flags(const struct nilfs_btree_node *node)
  91. {
  92. return node->bn_flags;
  93. }
  94. static inline void
  95. nilfs_btree_node_set_flags(struct nilfs_btree_node *node, int flags)
  96. {
  97. node->bn_flags = flags;
  98. }
  99. static inline int nilfs_btree_node_root(const struct nilfs_btree_node *node)
  100. {
  101. return nilfs_btree_node_get_flags(node) & NILFS_BTREE_NODE_ROOT;
  102. }
  103. static inline int
  104. nilfs_btree_node_get_level(const struct nilfs_btree_node *node)
  105. {
  106. return node->bn_level;
  107. }
  108. static inline void
  109. nilfs_btree_node_set_level(struct nilfs_btree_node *node, int level)
  110. {
  111. node->bn_level = level;
  112. }
  113. static inline int
  114. nilfs_btree_node_get_nchildren(const struct nilfs_btree_node *node)
  115. {
  116. return le16_to_cpu(node->bn_nchildren);
  117. }
  118. static inline void
  119. nilfs_btree_node_set_nchildren(struct nilfs_btree_node *node, int nchildren)
  120. {
  121. node->bn_nchildren = cpu_to_le16(nchildren);
  122. }
  123. static inline int nilfs_btree_node_size(const struct nilfs_btree *btree)
  124. {
  125. return 1 << btree->bt_bmap.b_inode->i_blkbits;
  126. }
  127. static inline int
  128. nilfs_btree_node_nchildren_min(const struct nilfs_btree_node *node,
  129. const struct nilfs_btree *btree)
  130. {
  131. return nilfs_btree_node_root(node) ?
  132. NILFS_BTREE_ROOT_NCHILDREN_MIN :
  133. NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
  134. }
  135. static inline int
  136. nilfs_btree_node_nchildren_max(const struct nilfs_btree_node *node,
  137. const struct nilfs_btree *btree)
  138. {
  139. return nilfs_btree_node_root(node) ?
  140. NILFS_BTREE_ROOT_NCHILDREN_MAX :
  141. NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree));
  142. }
  143. static inline __le64 *
  144. nilfs_btree_node_dkeys(const struct nilfs_btree_node *node)
  145. {
  146. return (__le64 *)((char *)(node + 1) +
  147. (nilfs_btree_node_root(node) ?
  148. 0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE));
  149. }
  150. static inline __le64 *
  151. nilfs_btree_node_dptrs(const struct nilfs_btree_node *node,
  152. const struct nilfs_btree *btree)
  153. {
  154. return (__le64 *)(nilfs_btree_node_dkeys(node) +
  155. nilfs_btree_node_nchildren_max(node, btree));
  156. }
  157. static inline __u64
  158. nilfs_btree_node_get_key(const struct nilfs_btree_node *node, int index)
  159. {
  160. return nilfs_bmap_dkey_to_key(*(nilfs_btree_node_dkeys(node) + index));
  161. }
  162. static inline void
  163. nilfs_btree_node_set_key(struct nilfs_btree_node *node, int index, __u64 key)
  164. {
  165. *(nilfs_btree_node_dkeys(node) + index) = nilfs_bmap_key_to_dkey(key);
  166. }
  167. static inline __u64
  168. nilfs_btree_node_get_ptr(const struct nilfs_btree *btree,
  169. const struct nilfs_btree_node *node, int index)
  170. {
  171. return nilfs_bmap_dptr_to_ptr(*(nilfs_btree_node_dptrs(node, btree) +
  172. index));
  173. }
  174. static inline void
  175. nilfs_btree_node_set_ptr(struct nilfs_btree *btree,
  176. struct nilfs_btree_node *node, int index, __u64 ptr)
  177. {
  178. *(nilfs_btree_node_dptrs(node, btree) + index) =
  179. nilfs_bmap_ptr_to_dptr(ptr);
  180. }
  181. static void nilfs_btree_node_init(struct nilfs_btree *btree,
  182. struct nilfs_btree_node *node,
  183. int flags, int level, int nchildren,
  184. const __u64 *keys, const __u64 *ptrs)
  185. {
  186. __le64 *dkeys;
  187. __le64 *dptrs;
  188. int i;
  189. nilfs_btree_node_set_flags(node, flags);
  190. nilfs_btree_node_set_level(node, level);
  191. nilfs_btree_node_set_nchildren(node, nchildren);
  192. dkeys = nilfs_btree_node_dkeys(node);
  193. dptrs = nilfs_btree_node_dptrs(node, btree);
  194. for (i = 0; i < nchildren; i++) {
  195. dkeys[i] = nilfs_bmap_key_to_dkey(keys[i]);
  196. dptrs[i] = nilfs_bmap_ptr_to_dptr(ptrs[i]);
  197. }
  198. }
  199. /* Assume the buffer heads corresponding to left and right are locked. */
  200. static void nilfs_btree_node_move_left(struct nilfs_btree *btree,
  201. struct nilfs_btree_node *left,
  202. struct nilfs_btree_node *right,
  203. int n)
  204. {
  205. __le64 *ldkeys, *rdkeys;
  206. __le64 *ldptrs, *rdptrs;
  207. int lnchildren, rnchildren;
  208. ldkeys = nilfs_btree_node_dkeys(left);
  209. ldptrs = nilfs_btree_node_dptrs(left, btree);
  210. lnchildren = nilfs_btree_node_get_nchildren(left);
  211. rdkeys = nilfs_btree_node_dkeys(right);
  212. rdptrs = nilfs_btree_node_dptrs(right, btree);
  213. rnchildren = nilfs_btree_node_get_nchildren(right);
  214. memcpy(ldkeys + lnchildren, rdkeys, n * sizeof(*rdkeys));
  215. memcpy(ldptrs + lnchildren, rdptrs, n * sizeof(*rdptrs));
  216. memmove(rdkeys, rdkeys + n, (rnchildren - n) * sizeof(*rdkeys));
  217. memmove(rdptrs, rdptrs + n, (rnchildren - n) * sizeof(*rdptrs));
  218. lnchildren += n;
  219. rnchildren -= n;
  220. nilfs_btree_node_set_nchildren(left, lnchildren);
  221. nilfs_btree_node_set_nchildren(right, rnchildren);
  222. }
  223. /* Assume that the buffer heads corresponding to left and right are locked. */
  224. static void nilfs_btree_node_move_right(struct nilfs_btree *btree,
  225. struct nilfs_btree_node *left,
  226. struct nilfs_btree_node *right,
  227. int n)
  228. {
  229. __le64 *ldkeys, *rdkeys;
  230. __le64 *ldptrs, *rdptrs;
  231. int lnchildren, rnchildren;
  232. ldkeys = nilfs_btree_node_dkeys(left);
  233. ldptrs = nilfs_btree_node_dptrs(left, btree);
  234. lnchildren = nilfs_btree_node_get_nchildren(left);
  235. rdkeys = nilfs_btree_node_dkeys(right);
  236. rdptrs = nilfs_btree_node_dptrs(right, btree);
  237. rnchildren = nilfs_btree_node_get_nchildren(right);
  238. memmove(rdkeys + n, rdkeys, rnchildren * sizeof(*rdkeys));
  239. memmove(rdptrs + n, rdptrs, rnchildren * sizeof(*rdptrs));
  240. memcpy(rdkeys, ldkeys + lnchildren - n, n * sizeof(*rdkeys));
  241. memcpy(rdptrs, ldptrs + lnchildren - n, n * sizeof(*rdptrs));
  242. lnchildren -= n;
  243. rnchildren += n;
  244. nilfs_btree_node_set_nchildren(left, lnchildren);
  245. nilfs_btree_node_set_nchildren(right, rnchildren);
  246. }
  247. /* Assume that the buffer head corresponding to node is locked. */
  248. static void nilfs_btree_node_insert(struct nilfs_btree *btree,
  249. struct nilfs_btree_node *node,
  250. __u64 key, __u64 ptr, int index)
  251. {
  252. __le64 *dkeys;
  253. __le64 *dptrs;
  254. int nchildren;
  255. dkeys = nilfs_btree_node_dkeys(node);
  256. dptrs = nilfs_btree_node_dptrs(node, btree);
  257. nchildren = nilfs_btree_node_get_nchildren(node);
  258. if (index < nchildren) {
  259. memmove(dkeys + index + 1, dkeys + index,
  260. (nchildren - index) * sizeof(*dkeys));
  261. memmove(dptrs + index + 1, dptrs + index,
  262. (nchildren - index) * sizeof(*dptrs));
  263. }
  264. dkeys[index] = nilfs_bmap_key_to_dkey(key);
  265. dptrs[index] = nilfs_bmap_ptr_to_dptr(ptr);
  266. nchildren++;
  267. nilfs_btree_node_set_nchildren(node, nchildren);
  268. }
  269. /* Assume that the buffer head corresponding to node is locked. */
  270. static void nilfs_btree_node_delete(struct nilfs_btree *btree,
  271. struct nilfs_btree_node *node,
  272. __u64 *keyp, __u64 *ptrp, int index)
  273. {
  274. __u64 key;
  275. __u64 ptr;
  276. __le64 *dkeys;
  277. __le64 *dptrs;
  278. int nchildren;
  279. dkeys = nilfs_btree_node_dkeys(node);
  280. dptrs = nilfs_btree_node_dptrs(node, btree);
  281. key = nilfs_bmap_dkey_to_key(dkeys[index]);
  282. ptr = nilfs_bmap_dptr_to_ptr(dptrs[index]);
  283. nchildren = nilfs_btree_node_get_nchildren(node);
  284. if (keyp != NULL)
  285. *keyp = key;
  286. if (ptrp != NULL)
  287. *ptrp = ptr;
  288. if (index < nchildren - 1) {
  289. memmove(dkeys + index, dkeys + index + 1,
  290. (nchildren - index - 1) * sizeof(*dkeys));
  291. memmove(dptrs + index, dptrs + index + 1,
  292. (nchildren - index - 1) * sizeof(*dptrs));
  293. }
  294. nchildren--;
  295. nilfs_btree_node_set_nchildren(node, nchildren);
  296. }
  297. static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node,
  298. __u64 key, int *indexp)
  299. {
  300. __u64 nkey;
  301. int index, low, high, s;
  302. /* binary search */
  303. low = 0;
  304. high = nilfs_btree_node_get_nchildren(node) - 1;
  305. index = 0;
  306. s = 0;
  307. while (low <= high) {
  308. index = (low + high) / 2;
  309. nkey = nilfs_btree_node_get_key(node, index);
  310. if (nkey == key) {
  311. s = 0;
  312. goto out;
  313. } else if (nkey < key) {
  314. low = index + 1;
  315. s = -1;
  316. } else {
  317. high = index - 1;
  318. s = 1;
  319. }
  320. }
  321. /* adjust index */
  322. if (nilfs_btree_node_get_level(node) > NILFS_BTREE_LEVEL_NODE_MIN) {
  323. if (s > 0 && index > 0)
  324. index--;
  325. } else if (s < 0)
  326. index++;
  327. out:
  328. *indexp = index;
  329. return s == 0;
  330. }
  331. static inline struct nilfs_btree_node *
  332. nilfs_btree_get_root(const struct nilfs_btree *btree)
  333. {
  334. return (struct nilfs_btree_node *)btree->bt_bmap.b_u.u_data;
  335. }
  336. static inline struct nilfs_btree_node *
  337. nilfs_btree_get_nonroot_node(const struct nilfs_btree_path *path, int level)
  338. {
  339. return (struct nilfs_btree_node *)path[level].bp_bh->b_data;
  340. }
  341. static inline struct nilfs_btree_node *
  342. nilfs_btree_get_sib_node(const struct nilfs_btree_path *path, int level)
  343. {
  344. return (struct nilfs_btree_node *)path[level].bp_sib_bh->b_data;
  345. }
  346. static inline int nilfs_btree_height(const struct nilfs_btree *btree)
  347. {
  348. return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1;
  349. }
  350. static inline struct nilfs_btree_node *
  351. nilfs_btree_get_node(const struct nilfs_btree *btree,
  352. const struct nilfs_btree_path *path,
  353. int level)
  354. {
  355. return (level == nilfs_btree_height(btree) - 1) ?
  356. nilfs_btree_get_root(btree) :
  357. nilfs_btree_get_nonroot_node(path, level);
  358. }
  359. static inline int
  360. nilfs_btree_bad_node(struct nilfs_btree_node *node, int level)
  361. {
  362. if (unlikely(nilfs_btree_node_get_level(node) != level)) {
  363. dump_stack();
  364. printk(KERN_CRIT "NILFS: btree level mismatch: %d != %d\n",
  365. nilfs_btree_node_get_level(node), level);
  366. return 1;
  367. }
  368. return 0;
  369. }
  370. static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
  371. struct nilfs_btree_path *path,
  372. __u64 key, __u64 *ptrp, int minlevel)
  373. {
  374. struct nilfs_btree_node *node;
  375. __u64 ptr;
  376. int level, index, found, ret;
  377. node = nilfs_btree_get_root(btree);
  378. level = nilfs_btree_node_get_level(node);
  379. if (level < minlevel || nilfs_btree_node_get_nchildren(node) <= 0)
  380. return -ENOENT;
  381. found = nilfs_btree_node_lookup(node, key, &index);
  382. ptr = nilfs_btree_node_get_ptr(btree, node, index);
  383. path[level].bp_bh = NULL;
  384. path[level].bp_index = index;
  385. for (level--; level >= minlevel; level--) {
  386. ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh);
  387. if (ret < 0)
  388. return ret;
  389. node = nilfs_btree_get_nonroot_node(path, level);
  390. if (nilfs_btree_bad_node(node, level))
  391. return -EINVAL;
  392. if (!found)
  393. found = nilfs_btree_node_lookup(node, key, &index);
  394. else
  395. index = 0;
  396. if (index < nilfs_btree_node_nchildren_max(node, btree))
  397. ptr = nilfs_btree_node_get_ptr(btree, node, index);
  398. else {
  399. WARN_ON(found || level != NILFS_BTREE_LEVEL_NODE_MIN);
  400. /* insert */
  401. ptr = NILFS_BMAP_INVALID_PTR;
  402. }
  403. path[level].bp_index = index;
  404. }
  405. if (!found)
  406. return -ENOENT;
  407. if (ptrp != NULL)
  408. *ptrp = ptr;
  409. return 0;
  410. }
  411. static int nilfs_btree_do_lookup_last(const struct nilfs_btree *btree,
  412. struct nilfs_btree_path *path,
  413. __u64 *keyp, __u64 *ptrp)
  414. {
  415. struct nilfs_btree_node *node;
  416. __u64 ptr;
  417. int index, level, ret;
  418. node = nilfs_btree_get_root(btree);
  419. index = nilfs_btree_node_get_nchildren(node) - 1;
  420. if (index < 0)
  421. return -ENOENT;
  422. level = nilfs_btree_node_get_level(node);
  423. ptr = nilfs_btree_node_get_ptr(btree, node, index);
  424. path[level].bp_bh = NULL;
  425. path[level].bp_index = index;
  426. for (level--; level > 0; level--) {
  427. ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh);
  428. if (ret < 0)
  429. return ret;
  430. node = nilfs_btree_get_nonroot_node(path, level);
  431. if (nilfs_btree_bad_node(node, level))
  432. return -EINVAL;
  433. index = nilfs_btree_node_get_nchildren(node) - 1;
  434. ptr = nilfs_btree_node_get_ptr(btree, node, index);
  435. path[level].bp_index = index;
  436. }
  437. if (keyp != NULL)
  438. *keyp = nilfs_btree_node_get_key(node, index);
  439. if (ptrp != NULL)
  440. *ptrp = ptr;
  441. return 0;
  442. }
  443. static int nilfs_btree_lookup(const struct nilfs_bmap *bmap,
  444. __u64 key, int level, __u64 *ptrp)
  445. {
  446. struct nilfs_btree *btree;
  447. struct nilfs_btree_path *path;
  448. __u64 ptr;
  449. int ret;
  450. btree = (struct nilfs_btree *)bmap;
  451. path = nilfs_btree_alloc_path();
  452. if (path == NULL)
  453. return -ENOMEM;
  454. ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level);
  455. if (ptrp != NULL)
  456. *ptrp = ptr;
  457. nilfs_btree_free_path(path);
  458. return ret;
  459. }
  460. static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap,
  461. __u64 key, __u64 *ptrp, unsigned maxblocks)
  462. {
  463. struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
  464. struct nilfs_btree_path *path;
  465. struct nilfs_btree_node *node;
  466. struct inode *dat = NULL;
  467. __u64 ptr, ptr2;
  468. sector_t blocknr;
  469. int level = NILFS_BTREE_LEVEL_NODE_MIN;
  470. int ret, cnt, index, maxlevel;
  471. path = nilfs_btree_alloc_path();
  472. if (path == NULL)
  473. return -ENOMEM;
  474. ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level);
  475. if (ret < 0)
  476. goto out;
  477. if (NILFS_BMAP_USE_VBN(bmap)) {
  478. dat = nilfs_bmap_get_dat(bmap);
  479. ret = nilfs_dat_translate(dat, ptr, &blocknr);
  480. if (ret < 0)
  481. goto out;
  482. ptr = blocknr;
  483. }
  484. cnt = 1;
  485. if (cnt == maxblocks)
  486. goto end;
  487. maxlevel = nilfs_btree_height(btree) - 1;
  488. node = nilfs_btree_get_node(btree, path, level);
  489. index = path[level].bp_index + 1;
  490. for (;;) {
  491. while (index < nilfs_btree_node_get_nchildren(node)) {
  492. if (nilfs_btree_node_get_key(node, index) !=
  493. key + cnt)
  494. goto end;
  495. ptr2 = nilfs_btree_node_get_ptr(btree, node, index);
  496. if (dat) {
  497. ret = nilfs_dat_translate(dat, ptr2, &blocknr);
  498. if (ret < 0)
  499. goto out;
  500. ptr2 = blocknr;
  501. }
  502. if (ptr2 != ptr + cnt || ++cnt == maxblocks)
  503. goto end;
  504. index++;
  505. continue;
  506. }
  507. if (level == maxlevel)
  508. break;
  509. /* look-up right sibling node */
  510. node = nilfs_btree_get_node(btree, path, level + 1);
  511. index = path[level + 1].bp_index + 1;
  512. if (index >= nilfs_btree_node_get_nchildren(node) ||
  513. nilfs_btree_node_get_key(node, index) != key + cnt)
  514. break;
  515. ptr2 = nilfs_btree_node_get_ptr(btree, node, index);
  516. path[level + 1].bp_index = index;
  517. brelse(path[level].bp_bh);
  518. path[level].bp_bh = NULL;
  519. ret = nilfs_btree_get_block(btree, ptr2, &path[level].bp_bh);
  520. if (ret < 0)
  521. goto out;
  522. node = nilfs_btree_get_nonroot_node(path, level);
  523. index = 0;
  524. path[level].bp_index = index;
  525. }
  526. end:
  527. *ptrp = ptr;
  528. ret = cnt;
  529. out:
  530. nilfs_btree_free_path(path);
  531. return ret;
  532. }
  533. static void nilfs_btree_promote_key(struct nilfs_btree *btree,
  534. struct nilfs_btree_path *path,
  535. int level, __u64 key)
  536. {
  537. if (level < nilfs_btree_height(btree) - 1) {
  538. do {
  539. nilfs_btree_node_set_key(
  540. nilfs_btree_get_nonroot_node(path, level),
  541. path[level].bp_index, key);
  542. if (!buffer_dirty(path[level].bp_bh))
  543. nilfs_btnode_mark_dirty(path[level].bp_bh);
  544. } while ((path[level].bp_index == 0) &&
  545. (++level < nilfs_btree_height(btree) - 1));
  546. }
  547. /* root */
  548. if (level == nilfs_btree_height(btree) - 1) {
  549. nilfs_btree_node_set_key(nilfs_btree_get_root(btree),
  550. path[level].bp_index, key);
  551. }
  552. }
  553. static void nilfs_btree_do_insert(struct nilfs_btree *btree,
  554. struct nilfs_btree_path *path,
  555. int level, __u64 *keyp, __u64 *ptrp)
  556. {
  557. struct nilfs_btree_node *node;
  558. if (level < nilfs_btree_height(btree) - 1) {
  559. node = nilfs_btree_get_nonroot_node(path, level);
  560. nilfs_btree_node_insert(btree, node, *keyp, *ptrp,
  561. path[level].bp_index);
  562. if (!buffer_dirty(path[level].bp_bh))
  563. nilfs_btnode_mark_dirty(path[level].bp_bh);
  564. if (path[level].bp_index == 0)
  565. nilfs_btree_promote_key(btree, path, level + 1,
  566. nilfs_btree_node_get_key(node,
  567. 0));
  568. } else {
  569. node = nilfs_btree_get_root(btree);
  570. nilfs_btree_node_insert(btree, node, *keyp, *ptrp,
  571. path[level].bp_index);
  572. }
  573. }
  574. static void nilfs_btree_carry_left(struct nilfs_btree *btree,
  575. struct nilfs_btree_path *path,
  576. int level, __u64 *keyp, __u64 *ptrp)
  577. {
  578. struct nilfs_btree_node *node, *left;
  579. int nchildren, lnchildren, n, move;
  580. node = nilfs_btree_get_nonroot_node(path, level);
  581. left = nilfs_btree_get_sib_node(path, level);
  582. nchildren = nilfs_btree_node_get_nchildren(node);
  583. lnchildren = nilfs_btree_node_get_nchildren(left);
  584. move = 0;
  585. n = (nchildren + lnchildren + 1) / 2 - lnchildren;
  586. if (n > path[level].bp_index) {
  587. /* move insert point */
  588. n--;
  589. move = 1;
  590. }
  591. nilfs_btree_node_move_left(btree, left, node, n);
  592. if (!buffer_dirty(path[level].bp_bh))
  593. nilfs_btnode_mark_dirty(path[level].bp_bh);
  594. if (!buffer_dirty(path[level].bp_sib_bh))
  595. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  596. nilfs_btree_promote_key(btree, path, level + 1,
  597. nilfs_btree_node_get_key(node, 0));
  598. if (move) {
  599. brelse(path[level].bp_bh);
  600. path[level].bp_bh = path[level].bp_sib_bh;
  601. path[level].bp_sib_bh = NULL;
  602. path[level].bp_index += lnchildren;
  603. path[level + 1].bp_index--;
  604. } else {
  605. brelse(path[level].bp_sib_bh);
  606. path[level].bp_sib_bh = NULL;
  607. path[level].bp_index -= n;
  608. }
  609. nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
  610. }
  611. static void nilfs_btree_carry_right(struct nilfs_btree *btree,
  612. struct nilfs_btree_path *path,
  613. int level, __u64 *keyp, __u64 *ptrp)
  614. {
  615. struct nilfs_btree_node *node, *right;
  616. int nchildren, rnchildren, n, move;
  617. node = nilfs_btree_get_nonroot_node(path, level);
  618. right = nilfs_btree_get_sib_node(path, level);
  619. nchildren = nilfs_btree_node_get_nchildren(node);
  620. rnchildren = nilfs_btree_node_get_nchildren(right);
  621. move = 0;
  622. n = (nchildren + rnchildren + 1) / 2 - rnchildren;
  623. if (n > nchildren - path[level].bp_index) {
  624. /* move insert point */
  625. n--;
  626. move = 1;
  627. }
  628. nilfs_btree_node_move_right(btree, node, right, n);
  629. if (!buffer_dirty(path[level].bp_bh))
  630. nilfs_btnode_mark_dirty(path[level].bp_bh);
  631. if (!buffer_dirty(path[level].bp_sib_bh))
  632. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  633. path[level + 1].bp_index++;
  634. nilfs_btree_promote_key(btree, path, level + 1,
  635. nilfs_btree_node_get_key(right, 0));
  636. path[level + 1].bp_index--;
  637. if (move) {
  638. brelse(path[level].bp_bh);
  639. path[level].bp_bh = path[level].bp_sib_bh;
  640. path[level].bp_sib_bh = NULL;
  641. path[level].bp_index -= nilfs_btree_node_get_nchildren(node);
  642. path[level + 1].bp_index++;
  643. } else {
  644. brelse(path[level].bp_sib_bh);
  645. path[level].bp_sib_bh = NULL;
  646. }
  647. nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
  648. }
  649. static void nilfs_btree_split(struct nilfs_btree *btree,
  650. struct nilfs_btree_path *path,
  651. int level, __u64 *keyp, __u64 *ptrp)
  652. {
  653. struct nilfs_btree_node *node, *right;
  654. __u64 newkey;
  655. __u64 newptr;
  656. int nchildren, n, move;
  657. node = nilfs_btree_get_nonroot_node(path, level);
  658. right = nilfs_btree_get_sib_node(path, level);
  659. nchildren = nilfs_btree_node_get_nchildren(node);
  660. move = 0;
  661. n = (nchildren + 1) / 2;
  662. if (n > nchildren - path[level].bp_index) {
  663. n--;
  664. move = 1;
  665. }
  666. nilfs_btree_node_move_right(btree, node, right, n);
  667. if (!buffer_dirty(path[level].bp_bh))
  668. nilfs_btnode_mark_dirty(path[level].bp_bh);
  669. if (!buffer_dirty(path[level].bp_sib_bh))
  670. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  671. newkey = nilfs_btree_node_get_key(right, 0);
  672. newptr = path[level].bp_newreq.bpr_ptr;
  673. if (move) {
  674. path[level].bp_index -= nilfs_btree_node_get_nchildren(node);
  675. nilfs_btree_node_insert(btree, right, *keyp, *ptrp,
  676. path[level].bp_index);
  677. *keyp = nilfs_btree_node_get_key(right, 0);
  678. *ptrp = path[level].bp_newreq.bpr_ptr;
  679. brelse(path[level].bp_bh);
  680. path[level].bp_bh = path[level].bp_sib_bh;
  681. path[level].bp_sib_bh = NULL;
  682. } else {
  683. nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
  684. *keyp = nilfs_btree_node_get_key(right, 0);
  685. *ptrp = path[level].bp_newreq.bpr_ptr;
  686. brelse(path[level].bp_sib_bh);
  687. path[level].bp_sib_bh = NULL;
  688. }
  689. path[level + 1].bp_index++;
  690. }
  691. static void nilfs_btree_grow(struct nilfs_btree *btree,
  692. struct nilfs_btree_path *path,
  693. int level, __u64 *keyp, __u64 *ptrp)
  694. {
  695. struct nilfs_btree_node *root, *child;
  696. int n;
  697. root = nilfs_btree_get_root(btree);
  698. child = nilfs_btree_get_sib_node(path, level);
  699. n = nilfs_btree_node_get_nchildren(root);
  700. nilfs_btree_node_move_right(btree, root, child, n);
  701. nilfs_btree_node_set_level(root, level + 1);
  702. if (!buffer_dirty(path[level].bp_sib_bh))
  703. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  704. path[level].bp_bh = path[level].bp_sib_bh;
  705. path[level].bp_sib_bh = NULL;
  706. nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
  707. *keyp = nilfs_btree_node_get_key(child, 0);
  708. *ptrp = path[level].bp_newreq.bpr_ptr;
  709. }
  710. static __u64 nilfs_btree_find_near(const struct nilfs_btree *btree,
  711. const struct nilfs_btree_path *path)
  712. {
  713. struct nilfs_btree_node *node;
  714. int level;
  715. if (path == NULL)
  716. return NILFS_BMAP_INVALID_PTR;
  717. /* left sibling */
  718. level = NILFS_BTREE_LEVEL_NODE_MIN;
  719. if (path[level].bp_index > 0) {
  720. node = nilfs_btree_get_node(btree, path, level);
  721. return nilfs_btree_node_get_ptr(btree, node,
  722. path[level].bp_index - 1);
  723. }
  724. /* parent */
  725. level = NILFS_BTREE_LEVEL_NODE_MIN + 1;
  726. if (level <= nilfs_btree_height(btree) - 1) {
  727. node = nilfs_btree_get_node(btree, path, level);
  728. return nilfs_btree_node_get_ptr(btree, node,
  729. path[level].bp_index);
  730. }
  731. return NILFS_BMAP_INVALID_PTR;
  732. }
  733. static __u64 nilfs_btree_find_target_v(const struct nilfs_btree *btree,
  734. const struct nilfs_btree_path *path,
  735. __u64 key)
  736. {
  737. __u64 ptr;
  738. ptr = nilfs_bmap_find_target_seq(&btree->bt_bmap, key);
  739. if (ptr != NILFS_BMAP_INVALID_PTR)
  740. /* sequential access */
  741. return ptr;
  742. else {
  743. ptr = nilfs_btree_find_near(btree, path);
  744. if (ptr != NILFS_BMAP_INVALID_PTR)
  745. /* near */
  746. return ptr;
  747. }
  748. /* block group */
  749. return nilfs_bmap_find_target_in_group(&btree->bt_bmap);
  750. }
  751. static void nilfs_btree_set_target_v(struct nilfs_btree *btree, __u64 key,
  752. __u64 ptr)
  753. {
  754. btree->bt_bmap.b_last_allocated_key = key;
  755. btree->bt_bmap.b_last_allocated_ptr = ptr;
  756. }
  757. static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
  758. struct nilfs_btree_path *path,
  759. int *levelp, __u64 key, __u64 ptr,
  760. struct nilfs_bmap_stats *stats)
  761. {
  762. struct buffer_head *bh;
  763. struct nilfs_btree_node *node, *parent, *sib;
  764. __u64 sibptr;
  765. int pindex, level, ret;
  766. struct inode *dat = NULL;
  767. stats->bs_nblocks = 0;
  768. level = NILFS_BTREE_LEVEL_DATA;
  769. /* allocate a new ptr for data block */
  770. if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) {
  771. path[level].bp_newreq.bpr_ptr =
  772. nilfs_btree_find_target_v(btree, path, key);
  773. dat = nilfs_bmap_get_dat(&btree->bt_bmap);
  774. }
  775. ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
  776. &path[level].bp_newreq, dat);
  777. if (ret < 0)
  778. goto err_out_data;
  779. for (level = NILFS_BTREE_LEVEL_NODE_MIN;
  780. level < nilfs_btree_height(btree) - 1;
  781. level++) {
  782. node = nilfs_btree_get_nonroot_node(path, level);
  783. if (nilfs_btree_node_get_nchildren(node) <
  784. nilfs_btree_node_nchildren_max(node, btree)) {
  785. path[level].bp_op = nilfs_btree_do_insert;
  786. stats->bs_nblocks++;
  787. goto out;
  788. }
  789. parent = nilfs_btree_get_node(btree, path, level + 1);
  790. pindex = path[level + 1].bp_index;
  791. /* left sibling */
  792. if (pindex > 0) {
  793. sibptr = nilfs_btree_node_get_ptr(btree, parent,
  794. pindex - 1);
  795. ret = nilfs_btree_get_block(btree, sibptr, &bh);
  796. if (ret < 0)
  797. goto err_out_child_node;
  798. sib = (struct nilfs_btree_node *)bh->b_data;
  799. if (nilfs_btree_node_get_nchildren(sib) <
  800. nilfs_btree_node_nchildren_max(sib, btree)) {
  801. path[level].bp_sib_bh = bh;
  802. path[level].bp_op = nilfs_btree_carry_left;
  803. stats->bs_nblocks++;
  804. goto out;
  805. } else
  806. brelse(bh);
  807. }
  808. /* right sibling */
  809. if (pindex <
  810. nilfs_btree_node_get_nchildren(parent) - 1) {
  811. sibptr = nilfs_btree_node_get_ptr(btree, parent,
  812. pindex + 1);
  813. ret = nilfs_btree_get_block(btree, sibptr, &bh);
  814. if (ret < 0)
  815. goto err_out_child_node;
  816. sib = (struct nilfs_btree_node *)bh->b_data;
  817. if (nilfs_btree_node_get_nchildren(sib) <
  818. nilfs_btree_node_nchildren_max(sib, btree)) {
  819. path[level].bp_sib_bh = bh;
  820. path[level].bp_op = nilfs_btree_carry_right;
  821. stats->bs_nblocks++;
  822. goto out;
  823. } else
  824. brelse(bh);
  825. }
  826. /* split */
  827. path[level].bp_newreq.bpr_ptr =
  828. path[level - 1].bp_newreq.bpr_ptr + 1;
  829. ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
  830. &path[level].bp_newreq, dat);
  831. if (ret < 0)
  832. goto err_out_child_node;
  833. ret = nilfs_btree_get_new_block(btree,
  834. path[level].bp_newreq.bpr_ptr,
  835. &bh);
  836. if (ret < 0)
  837. goto err_out_curr_node;
  838. stats->bs_nblocks++;
  839. nilfs_btree_node_init(btree,
  840. (struct nilfs_btree_node *)bh->b_data,
  841. 0, level, 0, NULL, NULL);
  842. path[level].bp_sib_bh = bh;
  843. path[level].bp_op = nilfs_btree_split;
  844. }
  845. /* root */
  846. node = nilfs_btree_get_root(btree);
  847. if (nilfs_btree_node_get_nchildren(node) <
  848. nilfs_btree_node_nchildren_max(node, btree)) {
  849. path[level].bp_op = nilfs_btree_do_insert;
  850. stats->bs_nblocks++;
  851. goto out;
  852. }
  853. /* grow */
  854. path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1;
  855. ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
  856. &path[level].bp_newreq, dat);
  857. if (ret < 0)
  858. goto err_out_child_node;
  859. ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr,
  860. &bh);
  861. if (ret < 0)
  862. goto err_out_curr_node;
  863. nilfs_btree_node_init(btree, (struct nilfs_btree_node *)bh->b_data,
  864. 0, level, 0, NULL, NULL);
  865. path[level].bp_sib_bh = bh;
  866. path[level].bp_op = nilfs_btree_grow;
  867. level++;
  868. path[level].bp_op = nilfs_btree_do_insert;
  869. /* a newly-created node block and a data block are added */
  870. stats->bs_nblocks += 2;
  871. /* success */
  872. out:
  873. *levelp = level;
  874. return ret;
  875. /* error */
  876. err_out_curr_node:
  877. nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq,
  878. dat);
  879. err_out_child_node:
  880. for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) {
  881. nilfs_btnode_delete(path[level].bp_sib_bh);
  882. nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap,
  883. &path[level].bp_newreq, dat);
  884. }
  885. nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq,
  886. dat);
  887. err_out_data:
  888. *levelp = level;
  889. stats->bs_nblocks = 0;
  890. return ret;
  891. }
  892. static void nilfs_btree_commit_insert(struct nilfs_btree *btree,
  893. struct nilfs_btree_path *path,
  894. int maxlevel, __u64 key, __u64 ptr)
  895. {
  896. struct inode *dat = NULL;
  897. int level;
  898. set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
  899. ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr;
  900. if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) {
  901. nilfs_btree_set_target_v(btree, key, ptr);
  902. dat = nilfs_bmap_get_dat(&btree->bt_bmap);
  903. }
  904. for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
  905. nilfs_bmap_commit_alloc_ptr(&btree->bt_bmap,
  906. &path[level - 1].bp_newreq, dat);
  907. path[level].bp_op(btree, path, level, &key, &ptr);
  908. }
  909. if (!nilfs_bmap_dirty(&btree->bt_bmap))
  910. nilfs_bmap_set_dirty(&btree->bt_bmap);
  911. }
  912. static int nilfs_btree_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
  913. {
  914. struct nilfs_btree *btree;
  915. struct nilfs_btree_path *path;
  916. struct nilfs_bmap_stats stats;
  917. int level, ret;
  918. btree = (struct nilfs_btree *)bmap;
  919. path = nilfs_btree_alloc_path();
  920. if (path == NULL)
  921. return -ENOMEM;
  922. ret = nilfs_btree_do_lookup(btree, path, key, NULL,
  923. NILFS_BTREE_LEVEL_NODE_MIN);
  924. if (ret != -ENOENT) {
  925. if (ret == 0)
  926. ret = -EEXIST;
  927. goto out;
  928. }
  929. ret = nilfs_btree_prepare_insert(btree, path, &level, key, ptr, &stats);
  930. if (ret < 0)
  931. goto out;
  932. nilfs_btree_commit_insert(btree, path, level, key, ptr);
  933. nilfs_bmap_add_blocks(bmap, stats.bs_nblocks);
  934. out:
  935. nilfs_btree_free_path(path);
  936. return ret;
  937. }
  938. static void nilfs_btree_do_delete(struct nilfs_btree *btree,
  939. struct nilfs_btree_path *path,
  940. int level, __u64 *keyp, __u64 *ptrp)
  941. {
  942. struct nilfs_btree_node *node;
  943. if (level < nilfs_btree_height(btree) - 1) {
  944. node = nilfs_btree_get_nonroot_node(path, level);
  945. nilfs_btree_node_delete(btree, node, keyp, ptrp,
  946. path[level].bp_index);
  947. if (!buffer_dirty(path[level].bp_bh))
  948. nilfs_btnode_mark_dirty(path[level].bp_bh);
  949. if (path[level].bp_index == 0)
  950. nilfs_btree_promote_key(btree, path, level + 1,
  951. nilfs_btree_node_get_key(node, 0));
  952. } else {
  953. node = nilfs_btree_get_root(btree);
  954. nilfs_btree_node_delete(btree, node, keyp, ptrp,
  955. path[level].bp_index);
  956. }
  957. }
  958. static void nilfs_btree_borrow_left(struct nilfs_btree *btree,
  959. struct nilfs_btree_path *path,
  960. int level, __u64 *keyp, __u64 *ptrp)
  961. {
  962. struct nilfs_btree_node *node, *left;
  963. int nchildren, lnchildren, n;
  964. nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
  965. node = nilfs_btree_get_nonroot_node(path, level);
  966. left = nilfs_btree_get_sib_node(path, level);
  967. nchildren = nilfs_btree_node_get_nchildren(node);
  968. lnchildren = nilfs_btree_node_get_nchildren(left);
  969. n = (nchildren + lnchildren) / 2 - nchildren;
  970. nilfs_btree_node_move_right(btree, left, node, n);
  971. if (!buffer_dirty(path[level].bp_bh))
  972. nilfs_btnode_mark_dirty(path[level].bp_bh);
  973. if (!buffer_dirty(path[level].bp_sib_bh))
  974. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  975. nilfs_btree_promote_key(btree, path, level + 1,
  976. nilfs_btree_node_get_key(node, 0));
  977. brelse(path[level].bp_sib_bh);
  978. path[level].bp_sib_bh = NULL;
  979. path[level].bp_index += n;
  980. }
  981. static void nilfs_btree_borrow_right(struct nilfs_btree *btree,
  982. struct nilfs_btree_path *path,
  983. int level, __u64 *keyp, __u64 *ptrp)
  984. {
  985. struct nilfs_btree_node *node, *right;
  986. int nchildren, rnchildren, n;
  987. nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
  988. node = nilfs_btree_get_nonroot_node(path, level);
  989. right = nilfs_btree_get_sib_node(path, level);
  990. nchildren = nilfs_btree_node_get_nchildren(node);
  991. rnchildren = nilfs_btree_node_get_nchildren(right);
  992. n = (nchildren + rnchildren) / 2 - nchildren;
  993. nilfs_btree_node_move_left(btree, node, right, n);
  994. if (!buffer_dirty(path[level].bp_bh))
  995. nilfs_btnode_mark_dirty(path[level].bp_bh);
  996. if (!buffer_dirty(path[level].bp_sib_bh))
  997. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  998. path[level + 1].bp_index++;
  999. nilfs_btree_promote_key(btree, path, level + 1,
  1000. nilfs_btree_node_get_key(right, 0));
  1001. path[level + 1].bp_index--;
  1002. brelse(path[level].bp_sib_bh);
  1003. path[level].bp_sib_bh = NULL;
  1004. }
  1005. static void nilfs_btree_concat_left(struct nilfs_btree *btree,
  1006. struct nilfs_btree_path *path,
  1007. int level, __u64 *keyp, __u64 *ptrp)
  1008. {
  1009. struct nilfs_btree_node *node, *left;
  1010. int n;
  1011. nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
  1012. node = nilfs_btree_get_nonroot_node(path, level);
  1013. left = nilfs_btree_get_sib_node(path, level);
  1014. n = nilfs_btree_node_get_nchildren(node);
  1015. nilfs_btree_node_move_left(btree, left, node, n);
  1016. if (!buffer_dirty(path[level].bp_sib_bh))
  1017. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  1018. nilfs_btnode_delete(path[level].bp_bh);
  1019. path[level].bp_bh = path[level].bp_sib_bh;
  1020. path[level].bp_sib_bh = NULL;
  1021. path[level].bp_index += nilfs_btree_node_get_nchildren(left);
  1022. }
  1023. static void nilfs_btree_concat_right(struct nilfs_btree *btree,
  1024. struct nilfs_btree_path *path,
  1025. int level, __u64 *keyp, __u64 *ptrp)
  1026. {
  1027. struct nilfs_btree_node *node, *right;
  1028. int n;
  1029. nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
  1030. node = nilfs_btree_get_nonroot_node(path, level);
  1031. right = nilfs_btree_get_sib_node(path, level);
  1032. n = nilfs_btree_node_get_nchildren(right);
  1033. nilfs_btree_node_move_left(btree, node, right, n);
  1034. if (!buffer_dirty(path[level].bp_bh))
  1035. nilfs_btnode_mark_dirty(path[level].bp_bh);
  1036. nilfs_btnode_delete(path[level].bp_sib_bh);
  1037. path[level].bp_sib_bh = NULL;
  1038. path[level + 1].bp_index++;
  1039. }
  1040. static void nilfs_btree_shrink(struct nilfs_btree *btree,
  1041. struct nilfs_btree_path *path,
  1042. int level, __u64 *keyp, __u64 *ptrp)
  1043. {
  1044. struct nilfs_btree_node *root, *child;
  1045. int n;
  1046. nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
  1047. root = nilfs_btree_get_root(btree);
  1048. child = nilfs_btree_get_nonroot_node(path, level);
  1049. nilfs_btree_node_delete(btree, root, NULL, NULL, 0);
  1050. nilfs_btree_node_set_level(root, level);
  1051. n = nilfs_btree_node_get_nchildren(child);
  1052. nilfs_btree_node_move_left(btree, root, child, n);
  1053. nilfs_btnode_delete(path[level].bp_bh);
  1054. path[level].bp_bh = NULL;
  1055. }
  1056. static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
  1057. struct nilfs_btree_path *path,
  1058. int *levelp,
  1059. struct nilfs_bmap_stats *stats,
  1060. struct inode *dat)
  1061. {
  1062. struct buffer_head *bh;
  1063. struct nilfs_btree_node *node, *parent, *sib;
  1064. __u64 sibptr;
  1065. int pindex, level, ret;
  1066. ret = 0;
  1067. stats->bs_nblocks = 0;
  1068. for (level = NILFS_BTREE_LEVEL_NODE_MIN;
  1069. level < nilfs_btree_height(btree) - 1;
  1070. level++) {
  1071. node = nilfs_btree_get_nonroot_node(path, level);
  1072. path[level].bp_oldreq.bpr_ptr =
  1073. nilfs_btree_node_get_ptr(btree, node,
  1074. path[level].bp_index);
  1075. ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
  1076. &path[level].bp_oldreq, dat);
  1077. if (ret < 0)
  1078. goto err_out_child_node;
  1079. if (nilfs_btree_node_get_nchildren(node) >
  1080. nilfs_btree_node_nchildren_min(node, btree)) {
  1081. path[level].bp_op = nilfs_btree_do_delete;
  1082. stats->bs_nblocks++;
  1083. goto out;
  1084. }
  1085. parent = nilfs_btree_get_node(btree, path, level + 1);
  1086. pindex = path[level + 1].bp_index;
  1087. if (pindex > 0) {
  1088. /* left sibling */
  1089. sibptr = nilfs_btree_node_get_ptr(btree, parent,
  1090. pindex - 1);
  1091. ret = nilfs_btree_get_block(btree, sibptr, &bh);
  1092. if (ret < 0)
  1093. goto err_out_curr_node;
  1094. sib = (struct nilfs_btree_node *)bh->b_data;
  1095. if (nilfs_btree_node_get_nchildren(sib) >
  1096. nilfs_btree_node_nchildren_min(sib, btree)) {
  1097. path[level].bp_sib_bh = bh;
  1098. path[level].bp_op = nilfs_btree_borrow_left;
  1099. stats->bs_nblocks++;
  1100. goto out;
  1101. } else {
  1102. path[level].bp_sib_bh = bh;
  1103. path[level].bp_op = nilfs_btree_concat_left;
  1104. stats->bs_nblocks++;
  1105. /* continue; */
  1106. }
  1107. } else if (pindex <
  1108. nilfs_btree_node_get_nchildren(parent) - 1) {
  1109. /* right sibling */
  1110. sibptr = nilfs_btree_node_get_ptr(btree, parent,
  1111. pindex + 1);
  1112. ret = nilfs_btree_get_block(btree, sibptr, &bh);
  1113. if (ret < 0)
  1114. goto err_out_curr_node;
  1115. sib = (struct nilfs_btree_node *)bh->b_data;
  1116. if (nilfs_btree_node_get_nchildren(sib) >
  1117. nilfs_btree_node_nchildren_min(sib, btree)) {
  1118. path[level].bp_sib_bh = bh;
  1119. path[level].bp_op = nilfs_btree_borrow_right;
  1120. stats->bs_nblocks++;
  1121. goto out;
  1122. } else {
  1123. path[level].bp_sib_bh = bh;
  1124. path[level].bp_op = nilfs_btree_concat_right;
  1125. stats->bs_nblocks++;
  1126. /* continue; */
  1127. }
  1128. } else {
  1129. /* no siblings */
  1130. /* the only child of the root node */
  1131. WARN_ON(level != nilfs_btree_height(btree) - 2);
  1132. if (nilfs_btree_node_get_nchildren(node) - 1 <=
  1133. NILFS_BTREE_ROOT_NCHILDREN_MAX) {
  1134. path[level].bp_op = nilfs_btree_shrink;
  1135. stats->bs_nblocks += 2;
  1136. } else {
  1137. path[level].bp_op = nilfs_btree_do_delete;
  1138. stats->bs_nblocks++;
  1139. }
  1140. goto out;
  1141. }
  1142. }
  1143. node = nilfs_btree_get_root(btree);
  1144. path[level].bp_oldreq.bpr_ptr =
  1145. nilfs_btree_node_get_ptr(btree, node, path[level].bp_index);
  1146. ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
  1147. &path[level].bp_oldreq, dat);
  1148. if (ret < 0)
  1149. goto err_out_child_node;
  1150. /* child of the root node is deleted */
  1151. path[level].bp_op = nilfs_btree_do_delete;
  1152. stats->bs_nblocks++;
  1153. /* success */
  1154. out:
  1155. *levelp = level;
  1156. return ret;
  1157. /* error */
  1158. err_out_curr_node:
  1159. nilfs_bmap_abort_end_ptr(&btree->bt_bmap, &path[level].bp_oldreq, dat);
  1160. err_out_child_node:
  1161. for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) {
  1162. brelse(path[level].bp_sib_bh);
  1163. nilfs_bmap_abort_end_ptr(&btree->bt_bmap,
  1164. &path[level].bp_oldreq, dat);
  1165. }
  1166. *levelp = level;
  1167. stats->bs_nblocks = 0;
  1168. return ret;
  1169. }
  1170. static void nilfs_btree_commit_delete(struct nilfs_btree *btree,
  1171. struct nilfs_btree_path *path,
  1172. int maxlevel, struct inode *dat)
  1173. {
  1174. int level;
  1175. for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
  1176. nilfs_bmap_commit_end_ptr(&btree->bt_bmap,
  1177. &path[level].bp_oldreq, dat);
  1178. path[level].bp_op(btree, path, level, NULL, NULL);
  1179. }
  1180. if (!nilfs_bmap_dirty(&btree->bt_bmap))
  1181. nilfs_bmap_set_dirty(&btree->bt_bmap);
  1182. }
  1183. static int nilfs_btree_delete(struct nilfs_bmap *bmap, __u64 key)
  1184. {
  1185. struct nilfs_btree *btree;
  1186. struct nilfs_btree_path *path;
  1187. struct nilfs_bmap_stats stats;
  1188. struct inode *dat;
  1189. int level, ret;
  1190. btree = (struct nilfs_btree *)bmap;
  1191. path = nilfs_btree_alloc_path();
  1192. if (path == NULL)
  1193. return -ENOMEM;
  1194. ret = nilfs_btree_do_lookup(btree, path, key, NULL,
  1195. NILFS_BTREE_LEVEL_NODE_MIN);
  1196. if (ret < 0)
  1197. goto out;
  1198. dat = NILFS_BMAP_USE_VBN(&btree->bt_bmap) ?
  1199. nilfs_bmap_get_dat(&btree->bt_bmap) : NULL;
  1200. ret = nilfs_btree_prepare_delete(btree, path, &level, &stats, dat);
  1201. if (ret < 0)
  1202. goto out;
  1203. nilfs_btree_commit_delete(btree, path, level, dat);
  1204. nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks);
  1205. out:
  1206. nilfs_btree_free_path(path);
  1207. return ret;
  1208. }
  1209. static int nilfs_btree_last_key(const struct nilfs_bmap *bmap, __u64 *keyp)
  1210. {
  1211. struct nilfs_btree *btree;
  1212. struct nilfs_btree_path *path;
  1213. int ret;
  1214. btree = (struct nilfs_btree *)bmap;
  1215. path = nilfs_btree_alloc_path();
  1216. if (path == NULL)
  1217. return -ENOMEM;
  1218. ret = nilfs_btree_do_lookup_last(btree, path, keyp, NULL);
  1219. nilfs_btree_free_path(path);
  1220. return ret;
  1221. }
  1222. static int nilfs_btree_check_delete(struct nilfs_bmap *bmap, __u64 key)
  1223. {
  1224. struct buffer_head *bh;
  1225. struct nilfs_btree *btree;
  1226. struct nilfs_btree_node *root, *node;
  1227. __u64 maxkey, nextmaxkey;
  1228. __u64 ptr;
  1229. int nchildren, ret;
  1230. btree = (struct nilfs_btree *)bmap;
  1231. root = nilfs_btree_get_root(btree);
  1232. switch (nilfs_btree_height(btree)) {
  1233. case 2:
  1234. bh = NULL;
  1235. node = root;
  1236. break;
  1237. case 3:
  1238. nchildren = nilfs_btree_node_get_nchildren(root);
  1239. if (nchildren > 1)
  1240. return 0;
  1241. ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1);
  1242. ret = nilfs_btree_get_block(btree, ptr, &bh);
  1243. if (ret < 0)
  1244. return ret;
  1245. node = (struct nilfs_btree_node *)bh->b_data;
  1246. break;
  1247. default:
  1248. return 0;
  1249. }
  1250. nchildren = nilfs_btree_node_get_nchildren(node);
  1251. maxkey = nilfs_btree_node_get_key(node, nchildren - 1);
  1252. nextmaxkey = (nchildren > 1) ?
  1253. nilfs_btree_node_get_key(node, nchildren - 2) : 0;
  1254. if (bh != NULL)
  1255. brelse(bh);
  1256. return (maxkey == key) && (nextmaxkey < NILFS_BMAP_LARGE_LOW);
  1257. }
  1258. static int nilfs_btree_gather_data(struct nilfs_bmap *bmap,
  1259. __u64 *keys, __u64 *ptrs, int nitems)
  1260. {
  1261. struct buffer_head *bh;
  1262. struct nilfs_btree *btree;
  1263. struct nilfs_btree_node *node, *root;
  1264. __le64 *dkeys;
  1265. __le64 *dptrs;
  1266. __u64 ptr;
  1267. int nchildren, i, ret;
  1268. btree = (struct nilfs_btree *)bmap;
  1269. root = nilfs_btree_get_root(btree);
  1270. switch (nilfs_btree_height(btree)) {
  1271. case 2:
  1272. bh = NULL;
  1273. node = root;
  1274. break;
  1275. case 3:
  1276. nchildren = nilfs_btree_node_get_nchildren(root);
  1277. WARN_ON(nchildren > 1);
  1278. ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1);
  1279. ret = nilfs_btree_get_block(btree, ptr, &bh);
  1280. if (ret < 0)
  1281. return ret;
  1282. node = (struct nilfs_btree_node *)bh->b_data;
  1283. break;
  1284. default:
  1285. node = NULL;
  1286. return -EINVAL;
  1287. }
  1288. nchildren = nilfs_btree_node_get_nchildren(node);
  1289. if (nchildren < nitems)
  1290. nitems = nchildren;
  1291. dkeys = nilfs_btree_node_dkeys(node);
  1292. dptrs = nilfs_btree_node_dptrs(node, btree);
  1293. for (i = 0; i < nitems; i++) {
  1294. keys[i] = nilfs_bmap_dkey_to_key(dkeys[i]);
  1295. ptrs[i] = nilfs_bmap_dptr_to_ptr(dptrs[i]);
  1296. }
  1297. if (bh != NULL)
  1298. brelse(bh);
  1299. return nitems;
  1300. }
  1301. static int
  1302. nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
  1303. union nilfs_bmap_ptr_req *dreq,
  1304. union nilfs_bmap_ptr_req *nreq,
  1305. struct buffer_head **bhp,
  1306. struct nilfs_bmap_stats *stats)
  1307. {
  1308. struct buffer_head *bh;
  1309. struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
  1310. struct inode *dat = NULL;
  1311. int ret;
  1312. stats->bs_nblocks = 0;
  1313. /* for data */
  1314. /* cannot find near ptr */
  1315. if (NILFS_BMAP_USE_VBN(bmap)) {
  1316. dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key);
  1317. dat = nilfs_bmap_get_dat(bmap);
  1318. }
  1319. ret = nilfs_bmap_prepare_alloc_ptr(bmap, dreq, dat);
  1320. if (ret < 0)
  1321. return ret;
  1322. *bhp = NULL;
  1323. stats->bs_nblocks++;
  1324. if (nreq != NULL) {
  1325. nreq->bpr_ptr = dreq->bpr_ptr + 1;
  1326. ret = nilfs_bmap_prepare_alloc_ptr(bmap, nreq, dat);
  1327. if (ret < 0)
  1328. goto err_out_dreq;
  1329. ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh);
  1330. if (ret < 0)
  1331. goto err_out_nreq;
  1332. *bhp = bh;
  1333. stats->bs_nblocks++;
  1334. }
  1335. /* success */
  1336. return 0;
  1337. /* error */
  1338. err_out_nreq:
  1339. nilfs_bmap_abort_alloc_ptr(bmap, nreq, dat);
  1340. err_out_dreq:
  1341. nilfs_bmap_abort_alloc_ptr(bmap, dreq, dat);
  1342. stats->bs_nblocks = 0;
  1343. return ret;
  1344. }
  1345. static void
  1346. nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
  1347. __u64 key, __u64 ptr,
  1348. const __u64 *keys, const __u64 *ptrs,
  1349. int n,
  1350. union nilfs_bmap_ptr_req *dreq,
  1351. union nilfs_bmap_ptr_req *nreq,
  1352. struct buffer_head *bh)
  1353. {
  1354. struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
  1355. struct nilfs_btree_node *node;
  1356. struct inode *dat;
  1357. __u64 tmpptr;
  1358. /* free resources */
  1359. if (bmap->b_ops->bop_clear != NULL)
  1360. bmap->b_ops->bop_clear(bmap);
  1361. /* ptr must be a pointer to a buffer head. */
  1362. set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
  1363. /* convert and insert */
  1364. dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
  1365. nilfs_btree_init(bmap);
  1366. if (nreq != NULL) {
  1367. nilfs_bmap_commit_alloc_ptr(bmap, dreq, dat);
  1368. nilfs_bmap_commit_alloc_ptr(bmap, nreq, dat);
  1369. /* create child node at level 1 */
  1370. node = (struct nilfs_btree_node *)bh->b_data;
  1371. nilfs_btree_node_init(btree, node, 0, 1, n, keys, ptrs);
  1372. nilfs_btree_node_insert(btree, node,
  1373. key, dreq->bpr_ptr, n);
  1374. if (!buffer_dirty(bh))
  1375. nilfs_btnode_mark_dirty(bh);
  1376. if (!nilfs_bmap_dirty(bmap))
  1377. nilfs_bmap_set_dirty(bmap);
  1378. brelse(bh);
  1379. /* create root node at level 2 */
  1380. node = nilfs_btree_get_root(btree);
  1381. tmpptr = nreq->bpr_ptr;
  1382. nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT,
  1383. 2, 1, &keys[0], &tmpptr);
  1384. } else {
  1385. nilfs_bmap_commit_alloc_ptr(bmap, dreq, dat);
  1386. /* create root node at level 1 */
  1387. node = nilfs_btree_get_root(btree);
  1388. nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT,
  1389. 1, n, keys, ptrs);
  1390. nilfs_btree_node_insert(btree, node,
  1391. key, dreq->bpr_ptr, n);
  1392. if (!nilfs_bmap_dirty(bmap))
  1393. nilfs_bmap_set_dirty(bmap);
  1394. }
  1395. if (NILFS_BMAP_USE_VBN(bmap))
  1396. nilfs_btree_set_target_v(btree, key, dreq->bpr_ptr);
  1397. }
  1398. /**
  1399. * nilfs_btree_convert_and_insert -
  1400. * @bmap:
  1401. * @key:
  1402. * @ptr:
  1403. * @keys:
  1404. * @ptrs:
  1405. * @n:
  1406. */
  1407. int nilfs_btree_convert_and_insert(struct nilfs_bmap *bmap,
  1408. __u64 key, __u64 ptr,
  1409. const __u64 *keys, const __u64 *ptrs, int n)
  1410. {
  1411. struct buffer_head *bh;
  1412. union nilfs_bmap_ptr_req dreq, nreq, *di, *ni;
  1413. struct nilfs_bmap_stats stats;
  1414. int ret;
  1415. if (n + 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX) {
  1416. di = &dreq;
  1417. ni = NULL;
  1418. } else if ((n + 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX(
  1419. 1 << bmap->b_inode->i_blkbits)) {
  1420. di = &dreq;
  1421. ni = &nreq;
  1422. } else {
  1423. di = NULL;
  1424. ni = NULL;
  1425. BUG();
  1426. }
  1427. ret = nilfs_btree_prepare_convert_and_insert(bmap, key, di, ni, &bh,
  1428. &stats);
  1429. if (ret < 0)
  1430. return ret;
  1431. nilfs_btree_commit_convert_and_insert(bmap, key, ptr, keys, ptrs, n,
  1432. di, ni, bh);
  1433. nilfs_bmap_add_blocks(bmap, stats.bs_nblocks);
  1434. return 0;
  1435. }
  1436. static int nilfs_btree_propagate_p(struct nilfs_btree *btree,
  1437. struct nilfs_btree_path *path,
  1438. int level,
  1439. struct buffer_head *bh)
  1440. {
  1441. while ((++level < nilfs_btree_height(btree) - 1) &&
  1442. !buffer_dirty(path[level].bp_bh))
  1443. nilfs_btnode_mark_dirty(path[level].bp_bh);
  1444. return 0;
  1445. }
  1446. static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
  1447. struct nilfs_btree_path *path,
  1448. int level, struct inode *dat)
  1449. {
  1450. struct nilfs_btree_node *parent;
  1451. int ret;
  1452. parent = nilfs_btree_get_node(btree, path, level + 1);
  1453. path[level].bp_oldreq.bpr_ptr =
  1454. nilfs_btree_node_get_ptr(btree, parent,
  1455. path[level + 1].bp_index);
  1456. path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1;
  1457. ret = nilfs_dat_prepare_update(dat, &path[level].bp_oldreq.bpr_req,
  1458. &path[level].bp_newreq.bpr_req);
  1459. if (ret < 0)
  1460. return ret;
  1461. if (buffer_nilfs_node(path[level].bp_bh)) {
  1462. path[level].bp_ctxt.oldkey = path[level].bp_oldreq.bpr_ptr;
  1463. path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
  1464. path[level].bp_ctxt.bh = path[level].bp_bh;
  1465. ret = nilfs_btnode_prepare_change_key(
  1466. &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
  1467. &path[level].bp_ctxt);
  1468. if (ret < 0) {
  1469. nilfs_dat_abort_update(dat,
  1470. &path[level].bp_oldreq.bpr_req,
  1471. &path[level].bp_newreq.bpr_req);
  1472. return ret;
  1473. }
  1474. }
  1475. return 0;
  1476. }
  1477. static void nilfs_btree_commit_update_v(struct nilfs_btree *btree,
  1478. struct nilfs_btree_path *path,
  1479. int level, struct inode *dat)
  1480. {
  1481. struct nilfs_btree_node *parent;
  1482. nilfs_dat_commit_update(dat, &path[level].bp_oldreq.bpr_req,
  1483. &path[level].bp_newreq.bpr_req,
  1484. btree->bt_bmap.b_ptr_type == NILFS_BMAP_PTR_VS);
  1485. if (buffer_nilfs_node(path[level].bp_bh)) {
  1486. nilfs_btnode_commit_change_key(
  1487. &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
  1488. &path[level].bp_ctxt);
  1489. path[level].bp_bh = path[level].bp_ctxt.bh;
  1490. }
  1491. set_buffer_nilfs_volatile(path[level].bp_bh);
  1492. parent = nilfs_btree_get_node(btree, path, level + 1);
  1493. nilfs_btree_node_set_ptr(btree, parent, path[level + 1].bp_index,
  1494. path[level].bp_newreq.bpr_ptr);
  1495. }
  1496. static void nilfs_btree_abort_update_v(struct nilfs_btree *btree,
  1497. struct nilfs_btree_path *path,
  1498. int level, struct inode *dat)
  1499. {
  1500. nilfs_dat_abort_update(dat, &path[level].bp_oldreq.bpr_req,
  1501. &path[level].bp_newreq.bpr_req);
  1502. if (buffer_nilfs_node(path[level].bp_bh))
  1503. nilfs_btnode_abort_change_key(
  1504. &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
  1505. &path[level].bp_ctxt);
  1506. }
  1507. static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree,
  1508. struct nilfs_btree_path *path,
  1509. int minlevel, int *maxlevelp,
  1510. struct inode *dat)
  1511. {
  1512. int level, ret;
  1513. level = minlevel;
  1514. if (!buffer_nilfs_volatile(path[level].bp_bh)) {
  1515. ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
  1516. if (ret < 0)
  1517. return ret;
  1518. }
  1519. while ((++level < nilfs_btree_height(btree) - 1) &&
  1520. !buffer_dirty(path[level].bp_bh)) {
  1521. WARN_ON(buffer_nilfs_volatile(path[level].bp_bh));
  1522. ret = nilfs_btree_prepare_update_v(btree, path, level, dat);
  1523. if (ret < 0)
  1524. goto out;
  1525. }
  1526. /* success */
  1527. *maxlevelp = level - 1;
  1528. return 0;
  1529. /* error */
  1530. out:
  1531. while (--level > minlevel)
  1532. nilfs_btree_abort_update_v(btree, path, level, dat);
  1533. if (!buffer_nilfs_volatile(path[level].bp_bh))
  1534. nilfs_btree_abort_update_v(btree, path, level, dat);
  1535. return ret;
  1536. }
  1537. static void nilfs_btree_commit_propagate_v(struct nilfs_btree *btree,
  1538. struct nilfs_btree_path *path,
  1539. int minlevel, int maxlevel,
  1540. struct buffer_head *bh,
  1541. struct inode *dat)
  1542. {
  1543. int level;
  1544. if (!buffer_nilfs_volatile(path[minlevel].bp_bh))
  1545. nilfs_btree_commit_update_v(btree, path, minlevel, dat);
  1546. for (level = minlevel + 1; level <= maxlevel; level++)
  1547. nilfs_btree_commit_update_v(btree, path, level, dat);
  1548. }
  1549. static int nilfs_btree_propagate_v(struct nilfs_btree *btree,
  1550. struct nilfs_btree_path *path,
  1551. int level, struct buffer_head *bh)
  1552. {
  1553. int maxlevel = 0, ret;
  1554. struct nilfs_btree_node *parent;
  1555. struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap);
  1556. __u64 ptr;
  1557. get_bh(bh);
  1558. path[level].bp_bh = bh;
  1559. ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel,
  1560. dat);
  1561. if (ret < 0)
  1562. goto out;
  1563. if (buffer_nilfs_volatile(path[level].bp_bh)) {
  1564. parent = nilfs_btree_get_node(btree, path, level + 1);
  1565. ptr = nilfs_btree_node_get_ptr(btree, parent,
  1566. path[level + 1].bp_index);
  1567. ret = nilfs_dat_mark_dirty(dat, ptr);
  1568. if (ret < 0)
  1569. goto out;
  1570. }
  1571. nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh, dat);
  1572. out:
  1573. brelse(path[level].bp_bh);
  1574. path[level].bp_bh = NULL;
  1575. return ret;
  1576. }
  1577. static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
  1578. struct buffer_head *bh)
  1579. {
  1580. struct nilfs_btree *btree;
  1581. struct nilfs_btree_path *path;
  1582. struct nilfs_btree_node *node;
  1583. __u64 key;
  1584. int level, ret;
  1585. WARN_ON(!buffer_dirty(bh));
  1586. btree = (struct nilfs_btree *)bmap;
  1587. path = nilfs_btree_alloc_path();
  1588. if (path == NULL)
  1589. return -ENOMEM;
  1590. if (buffer_nilfs_node(bh)) {
  1591. node = (struct nilfs_btree_node *)bh->b_data;
  1592. key = nilfs_btree_node_get_key(node, 0);
  1593. level = nilfs_btree_node_get_level(node);
  1594. } else {
  1595. key = nilfs_bmap_data_get_key(bmap, bh);
  1596. level = NILFS_BTREE_LEVEL_DATA;
  1597. }
  1598. ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1);
  1599. if (ret < 0) {
  1600. if (unlikely(ret == -ENOENT))
  1601. printk(KERN_CRIT "%s: key = %llu, level == %d\n",
  1602. __func__, (unsigned long long)key, level);
  1603. goto out;
  1604. }
  1605. ret = NILFS_BMAP_USE_VBN(bmap) ?
  1606. nilfs_btree_propagate_v(btree, path, level, bh) :
  1607. nilfs_btree_propagate_p(btree, path, level, bh);
  1608. out:
  1609. nilfs_btree_free_path(path);
  1610. return ret;
  1611. }
  1612. static int nilfs_btree_propagate_gc(const struct nilfs_bmap *bmap,
  1613. struct buffer_head *bh)
  1614. {
  1615. return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), bh->b_blocknr);
  1616. }
  1617. static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree,
  1618. struct list_head *lists,
  1619. struct buffer_head *bh)
  1620. {
  1621. struct list_head *head;
  1622. struct buffer_head *cbh;
  1623. struct nilfs_btree_node *node, *cnode;
  1624. __u64 key, ckey;
  1625. int level;
  1626. get_bh(bh);
  1627. node = (struct nilfs_btree_node *)bh->b_data;
  1628. key = nilfs_btree_node_get_key(node, 0);
  1629. level = nilfs_btree_node_get_level(node);
  1630. list_for_each(head, &lists[level]) {
  1631. cbh = list_entry(head, struct buffer_head, b_assoc_buffers);
  1632. cnode = (struct nilfs_btree_node *)cbh->b_data;
  1633. ckey = nilfs_btree_node_get_key(cnode, 0);
  1634. if (key < ckey)
  1635. break;
  1636. }
  1637. list_add_tail(&bh->b_assoc_buffers, head);
  1638. }
  1639. static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *bmap,
  1640. struct list_head *listp)
  1641. {
  1642. struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
  1643. struct address_space *btcache = &NILFS_BMAP_I(bmap)->i_btnode_cache;
  1644. struct list_head lists[NILFS_BTREE_LEVEL_MAX];
  1645. struct pagevec pvec;
  1646. struct buffer_head *bh, *head;
  1647. pgoff_t index = 0;
  1648. int level, i;
  1649. for (level = NILFS_BTREE_LEVEL_NODE_MIN;
  1650. level < NILFS_BTREE_LEVEL_MAX;
  1651. level++)
  1652. INIT_LIST_HEAD(&lists[level]);
  1653. pagevec_init(&pvec, 0);
  1654. while (pagevec_lookup_tag(&pvec, btcache, &index, PAGECACHE_TAG_DIRTY,
  1655. PAGEVEC_SIZE)) {
  1656. for (i = 0; i < pagevec_count(&pvec); i++) {
  1657. bh = head = page_buffers(pvec.pages[i]);
  1658. do {
  1659. if (buffer_dirty(bh))
  1660. nilfs_btree_add_dirty_buffer(btree,
  1661. lists, bh);
  1662. } while ((bh = bh->b_this_page) != head);
  1663. }
  1664. pagevec_release(&pvec);
  1665. cond_resched();
  1666. }
  1667. for (level = NILFS_BTREE_LEVEL_NODE_MIN;
  1668. level < NILFS_BTREE_LEVEL_MAX;
  1669. level++)
  1670. list_splice_tail(&lists[level], listp);
  1671. }
  1672. static int nilfs_btree_assign_p(struct nilfs_btree *btree,
  1673. struct nilfs_btree_path *path,
  1674. int level,
  1675. struct buffer_head **bh,
  1676. sector_t blocknr,
  1677. union nilfs_binfo *binfo)
  1678. {
  1679. struct nilfs_btree_node *parent;
  1680. __u64 key;
  1681. __u64 ptr;
  1682. int ret;
  1683. parent = nilfs_btree_get_node(btree, path, level + 1);
  1684. ptr = nilfs_btree_node_get_ptr(btree, parent,
  1685. path[level + 1].bp_index);
  1686. if (buffer_nilfs_node(*bh)) {
  1687. path[level].bp_ctxt.oldkey = ptr;
  1688. path[level].bp_ctxt.newkey = blocknr;
  1689. path[level].bp_ctxt.bh = *bh;
  1690. ret = nilfs_btnode_prepare_change_key(
  1691. &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
  1692. &path[level].bp_ctxt);
  1693. if (ret < 0)
  1694. return ret;
  1695. nilfs_btnode_commit_change_key(
  1696. &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
  1697. &path[level].bp_ctxt);
  1698. *bh = path[level].bp_ctxt.bh;
  1699. }
  1700. nilfs_btree_node_set_ptr(btree, parent,
  1701. path[level + 1].bp_index, blocknr);
  1702. key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index);
  1703. /* on-disk format */
  1704. binfo->bi_dat.bi_blkoff = nilfs_bmap_key_to_dkey(key);
  1705. binfo->bi_dat.bi_level = level;
  1706. return 0;
  1707. }
  1708. static int nilfs_btree_assign_v(struct nilfs_btree *btree,
  1709. struct nilfs_btree_path *path,
  1710. int level,
  1711. struct buffer_head **bh,
  1712. sector_t blocknr,
  1713. union nilfs_binfo *binfo)
  1714. {
  1715. struct nilfs_btree_node *parent;
  1716. struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap);
  1717. __u64 key;
  1718. __u64 ptr;
  1719. union nilfs_bmap_ptr_req req;
  1720. int ret;
  1721. parent = nilfs_btree_get_node(btree, path, level + 1);
  1722. ptr = nilfs_btree_node_get_ptr(btree, parent,
  1723. path[level + 1].bp_index);
  1724. req.bpr_ptr = ptr;
  1725. ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
  1726. if (ret < 0)
  1727. return ret;
  1728. nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
  1729. key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index);
  1730. /* on-disk format */
  1731. binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr);
  1732. binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key);
  1733. return 0;
  1734. }
  1735. static int nilfs_btree_assign(struct nilfs_bmap *bmap,
  1736. struct buffer_head **bh,
  1737. sector_t blocknr,
  1738. union nilfs_binfo *binfo)
  1739. {
  1740. struct nilfs_btree *btree;
  1741. struct nilfs_btree_path *path;
  1742. struct nilfs_btree_node *node;
  1743. __u64 key;
  1744. int level, ret;
  1745. btree = (struct nilfs_btree *)bmap;
  1746. path = nilfs_btree_alloc_path();
  1747. if (path == NULL)
  1748. return -ENOMEM;
  1749. if (buffer_nilfs_node(*bh)) {
  1750. node = (struct nilfs_btree_node *)(*bh)->b_data;
  1751. key = nilfs_btree_node_get_key(node, 0);
  1752. level = nilfs_btree_node_get_level(node);
  1753. } else {
  1754. key = nilfs_bmap_data_get_key(bmap, *bh);
  1755. level = NILFS_BTREE_LEVEL_DATA;
  1756. }
  1757. ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1);
  1758. if (ret < 0) {
  1759. WARN_ON(ret == -ENOENT);
  1760. goto out;
  1761. }
  1762. ret = NILFS_BMAP_USE_VBN(bmap) ?
  1763. nilfs_btree_assign_v(btree, path, level, bh, blocknr, binfo) :
  1764. nilfs_btree_assign_p(btree, path, level, bh, blocknr, binfo);
  1765. out:
  1766. nilfs_btree_free_path(path);
  1767. return ret;
  1768. }
  1769. static int nilfs_btree_assign_gc(struct nilfs_bmap *bmap,
  1770. struct buffer_head **bh,
  1771. sector_t blocknr,
  1772. union nilfs_binfo *binfo)
  1773. {
  1774. struct nilfs_btree_node *node;
  1775. __u64 key;
  1776. int ret;
  1777. ret = nilfs_dat_move(nilfs_bmap_get_dat(bmap), (*bh)->b_blocknr,
  1778. blocknr);
  1779. if (ret < 0)
  1780. return ret;
  1781. if (buffer_nilfs_node(*bh)) {
  1782. node = (struct nilfs_btree_node *)(*bh)->b_data;
  1783. key = nilfs_btree_node_get_key(node, 0);
  1784. } else
  1785. key = nilfs_bmap_data_get_key(bmap, *bh);
  1786. /* on-disk format */
  1787. binfo->bi_v.bi_vblocknr = cpu_to_le64((*bh)->b_blocknr);
  1788. binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key);
  1789. return 0;
  1790. }
  1791. static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level)
  1792. {
  1793. struct buffer_head *bh;
  1794. struct nilfs_btree *btree;
  1795. struct nilfs_btree_path *path;
  1796. __u64 ptr;
  1797. int ret;
  1798. btree = (struct nilfs_btree *)bmap;
  1799. path = nilfs_btree_alloc_path();
  1800. if (path == NULL)
  1801. return -ENOMEM;
  1802. ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1);
  1803. if (ret < 0) {
  1804. WARN_ON(ret == -ENOENT);
  1805. goto out;
  1806. }
  1807. ret = nilfs_btree_get_block(btree, ptr, &bh);
  1808. if (ret < 0) {
  1809. WARN_ON(ret == -ENOENT);
  1810. goto out;
  1811. }
  1812. if (!buffer_dirty(bh))
  1813. nilfs_btnode_mark_dirty(bh);
  1814. brelse(bh);
  1815. if (!nilfs_bmap_dirty(&btree->bt_bmap))
  1816. nilfs_bmap_set_dirty(&btree->bt_bmap);
  1817. out:
  1818. nilfs_btree_free_path(path);
  1819. return ret;
  1820. }
  1821. static const struct nilfs_bmap_operations nilfs_btree_ops = {
  1822. .bop_lookup = nilfs_btree_lookup,
  1823. .bop_lookup_contig = nilfs_btree_lookup_contig,
  1824. .bop_insert = nilfs_btree_insert,
  1825. .bop_delete = nilfs_btree_delete,
  1826. .bop_clear = NULL,
  1827. .bop_propagate = nilfs_btree_propagate,
  1828. .bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers,
  1829. .bop_assign = nilfs_btree_assign,
  1830. .bop_mark = nilfs_btree_mark,
  1831. .bop_last_key = nilfs_btree_last_key,
  1832. .bop_check_insert = NULL,
  1833. .bop_check_delete = nilfs_btree_check_delete,
  1834. .bop_gather_data = nilfs_btree_gather_data,
  1835. };
  1836. static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
  1837. .bop_lookup = NULL,
  1838. .bop_lookup_contig = NULL,
  1839. .bop_insert = NULL,
  1840. .bop_delete = NULL,
  1841. .bop_clear = NULL,
  1842. .bop_propagate = nilfs_btree_propagate_gc,
  1843. .bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers,
  1844. .bop_assign = nilfs_btree_assign_gc,
  1845. .bop_mark = NULL,
  1846. .bop_last_key = NULL,
  1847. .bop_check_insert = NULL,
  1848. .bop_check_delete = NULL,
  1849. .bop_gather_data = NULL,
  1850. };
  1851. int nilfs_btree_init(struct nilfs_bmap *bmap)
  1852. {
  1853. bmap->b_ops = &nilfs_btree_ops;
  1854. return 0;
  1855. }
  1856. void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
  1857. {
  1858. bmap->b_ops = &nilfs_btree_ops_gc;
  1859. }