btree.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319
  1. /*
  2. * btree.c - NILFS B-tree.
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Koji Sato <koji@osrg.net>.
  21. */
  22. #include <linux/slab.h>
  23. #include <linux/string.h>
  24. #include <linux/errno.h>
  25. #include <linux/pagevec.h>
  26. #include "nilfs.h"
  27. #include "page.h"
  28. #include "btnode.h"
  29. #include "btree.h"
  30. #include "alloc.h"
  31. #include "dat.h"
  32. /**
  33. * struct nilfs_btree_path - A path on which B-tree operations are executed
  34. * @bp_bh: buffer head of node block
  35. * @bp_sib_bh: buffer head of sibling node block
  36. * @bp_index: index of child node
  37. * @bp_oldreq: ptr end request for old ptr
  38. * @bp_newreq: ptr alloc request for new ptr
  39. * @bp_op: rebalance operation
  40. */
  41. struct nilfs_btree_path {
  42. struct buffer_head *bp_bh;
  43. struct buffer_head *bp_sib_bh;
  44. int bp_index;
  45. union nilfs_bmap_ptr_req bp_oldreq;
  46. union nilfs_bmap_ptr_req bp_newreq;
  47. struct nilfs_btnode_chkey_ctxt bp_ctxt;
  48. void (*bp_op)(struct nilfs_btree *, struct nilfs_btree_path *,
  49. int, __u64 *, __u64 *);
  50. };
  51. /*
  52. * B-tree path operations
  53. */
  54. static struct kmem_cache *nilfs_btree_path_cache;
  55. int __init nilfs_btree_path_cache_init(void)
  56. {
  57. nilfs_btree_path_cache =
  58. kmem_cache_create("nilfs2_btree_path_cache",
  59. sizeof(struct nilfs_btree_path) *
  60. NILFS_BTREE_LEVEL_MAX, 0, 0, NULL);
  61. return (nilfs_btree_path_cache != NULL) ? 0 : -ENOMEM;
  62. }
  63. void nilfs_btree_path_cache_destroy(void)
  64. {
  65. kmem_cache_destroy(nilfs_btree_path_cache);
  66. }
  67. static inline struct nilfs_btree_path *
  68. nilfs_btree_alloc_path(const struct nilfs_btree *btree)
  69. {
  70. return (struct nilfs_btree_path *)
  71. kmem_cache_alloc(nilfs_btree_path_cache, GFP_NOFS);
  72. }
  73. static inline void nilfs_btree_free_path(const struct nilfs_btree *btree,
  74. struct nilfs_btree_path *path)
  75. {
  76. kmem_cache_free(nilfs_btree_path_cache, path);
  77. }
  78. static void nilfs_btree_init_path(const struct nilfs_btree *btree,
  79. struct nilfs_btree_path *path)
  80. {
  81. int level;
  82. for (level = NILFS_BTREE_LEVEL_DATA;
  83. level < NILFS_BTREE_LEVEL_MAX;
  84. level++) {
  85. path[level].bp_bh = NULL;
  86. path[level].bp_sib_bh = NULL;
  87. path[level].bp_index = 0;
  88. path[level].bp_oldreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
  89. path[level].bp_newreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
  90. path[level].bp_op = NULL;
  91. }
  92. }
  93. static void nilfs_btree_clear_path(const struct nilfs_btree *btree,
  94. struct nilfs_btree_path *path)
  95. {
  96. int level;
  97. for (level = NILFS_BTREE_LEVEL_DATA;
  98. level < NILFS_BTREE_LEVEL_MAX;
  99. level++) {
  100. if (path[level].bp_bh != NULL) {
  101. brelse(path[level].bp_bh);
  102. path[level].bp_bh = NULL;
  103. }
  104. /* sib_bh is released or deleted by prepare or commit
  105. * operations. */
  106. path[level].bp_sib_bh = NULL;
  107. path[level].bp_index = 0;
  108. path[level].bp_oldreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
  109. path[level].bp_newreq.bpr_ptr = NILFS_BMAP_INVALID_PTR;
  110. path[level].bp_op = NULL;
  111. }
  112. }
  113. /*
  114. * B-tree node operations
  115. */
  116. static int nilfs_btree_get_block(const struct nilfs_btree *btree, __u64 ptr,
  117. struct buffer_head **bhp)
  118. {
  119. struct address_space *btnc =
  120. &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
  121. return nilfs_btnode_get(btnc, ptr, 0, bhp, 0);
  122. }
  123. static int nilfs_btree_get_new_block(const struct nilfs_btree *btree,
  124. __u64 ptr, struct buffer_head **bhp)
  125. {
  126. struct address_space *btnc =
  127. &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
  128. int ret;
  129. ret = nilfs_btnode_get(btnc, ptr, 0, bhp, 1);
  130. if (!ret)
  131. set_buffer_nilfs_volatile(*bhp);
  132. return ret;
  133. }
  134. static inline int
  135. nilfs_btree_node_get_flags(const struct nilfs_btree *btree,
  136. const struct nilfs_btree_node *node)
  137. {
  138. return node->bn_flags;
  139. }
  140. static inline void
  141. nilfs_btree_node_set_flags(struct nilfs_btree *btree,
  142. struct nilfs_btree_node *node,
  143. int flags)
  144. {
  145. node->bn_flags = flags;
  146. }
  147. static inline int nilfs_btree_node_root(const struct nilfs_btree *btree,
  148. const struct nilfs_btree_node *node)
  149. {
  150. return nilfs_btree_node_get_flags(btree, node) & NILFS_BTREE_NODE_ROOT;
  151. }
  152. static inline int
  153. nilfs_btree_node_get_level(const struct nilfs_btree *btree,
  154. const struct nilfs_btree_node *node)
  155. {
  156. return node->bn_level;
  157. }
  158. static inline void
  159. nilfs_btree_node_set_level(struct nilfs_btree *btree,
  160. struct nilfs_btree_node *node,
  161. int level)
  162. {
  163. node->bn_level = level;
  164. }
  165. static inline int
  166. nilfs_btree_node_get_nchildren(const struct nilfs_btree *btree,
  167. const struct nilfs_btree_node *node)
  168. {
  169. return le16_to_cpu(node->bn_nchildren);
  170. }
  171. static inline void
  172. nilfs_btree_node_set_nchildren(struct nilfs_btree *btree,
  173. struct nilfs_btree_node *node,
  174. int nchildren)
  175. {
  176. node->bn_nchildren = cpu_to_le16(nchildren);
  177. }
  178. static inline int
  179. nilfs_btree_node_size(const struct nilfs_btree *btree)
  180. {
  181. return 1 << btree->bt_bmap.b_inode->i_blkbits;
  182. }
  183. static inline int
  184. nilfs_btree_node_nchildren_min(const struct nilfs_btree *btree,
  185. const struct nilfs_btree_node *node)
  186. {
  187. return nilfs_btree_node_root(btree, node) ?
  188. NILFS_BTREE_ROOT_NCHILDREN_MIN :
  189. NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
  190. }
  191. static inline int
  192. nilfs_btree_node_nchildren_max(const struct nilfs_btree *btree,
  193. const struct nilfs_btree_node *node)
  194. {
  195. return nilfs_btree_node_root(btree, node) ?
  196. NILFS_BTREE_ROOT_NCHILDREN_MAX :
  197. NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree));
  198. }
  199. static inline __le64 *
  200. nilfs_btree_node_dkeys(const struct nilfs_btree *btree,
  201. const struct nilfs_btree_node *node)
  202. {
  203. return (__le64 *)((char *)(node + 1) +
  204. (nilfs_btree_node_root(btree, node) ?
  205. 0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE));
  206. }
  207. static inline __le64 *
  208. nilfs_btree_node_dptrs(const struct nilfs_btree *btree,
  209. const struct nilfs_btree_node *node)
  210. {
  211. return (__le64 *)(nilfs_btree_node_dkeys(btree, node) +
  212. nilfs_btree_node_nchildren_max(btree, node));
  213. }
  214. static inline __u64
  215. nilfs_btree_node_get_key(const struct nilfs_btree *btree,
  216. const struct nilfs_btree_node *node, int index)
  217. {
  218. return nilfs_bmap_dkey_to_key(*(nilfs_btree_node_dkeys(btree, node) +
  219. index));
  220. }
  221. static inline void
  222. nilfs_btree_node_set_key(struct nilfs_btree *btree,
  223. struct nilfs_btree_node *node, int index, __u64 key)
  224. {
  225. *(nilfs_btree_node_dkeys(btree, node) + index) =
  226. nilfs_bmap_key_to_dkey(key);
  227. }
  228. static inline __u64
  229. nilfs_btree_node_get_ptr(const struct nilfs_btree *btree,
  230. const struct nilfs_btree_node *node,
  231. int index)
  232. {
  233. return nilfs_bmap_dptr_to_ptr(*(nilfs_btree_node_dptrs(btree, node) +
  234. index));
  235. }
  236. static inline void
  237. nilfs_btree_node_set_ptr(struct nilfs_btree *btree,
  238. struct nilfs_btree_node *node,
  239. int index,
  240. __u64 ptr)
  241. {
  242. *(nilfs_btree_node_dptrs(btree, node) + index) =
  243. nilfs_bmap_ptr_to_dptr(ptr);
  244. }
  245. static void nilfs_btree_node_init(struct nilfs_btree *btree,
  246. struct nilfs_btree_node *node,
  247. int flags, int level, int nchildren,
  248. const __u64 *keys, const __u64 *ptrs)
  249. {
  250. __le64 *dkeys;
  251. __le64 *dptrs;
  252. int i;
  253. nilfs_btree_node_set_flags(btree, node, flags);
  254. nilfs_btree_node_set_level(btree, node, level);
  255. nilfs_btree_node_set_nchildren(btree, node, nchildren);
  256. dkeys = nilfs_btree_node_dkeys(btree, node);
  257. dptrs = nilfs_btree_node_dptrs(btree, node);
  258. for (i = 0; i < nchildren; i++) {
  259. dkeys[i] = nilfs_bmap_key_to_dkey(keys[i]);
  260. dptrs[i] = nilfs_bmap_ptr_to_dptr(ptrs[i]);
  261. }
  262. }
  263. /* Assume the buffer heads corresponding to left and right are locked. */
  264. static void nilfs_btree_node_move_left(struct nilfs_btree *btree,
  265. struct nilfs_btree_node *left,
  266. struct nilfs_btree_node *right,
  267. int n)
  268. {
  269. __le64 *ldkeys, *rdkeys;
  270. __le64 *ldptrs, *rdptrs;
  271. int lnchildren, rnchildren;
  272. ldkeys = nilfs_btree_node_dkeys(btree, left);
  273. ldptrs = nilfs_btree_node_dptrs(btree, left);
  274. lnchildren = nilfs_btree_node_get_nchildren(btree, left);
  275. rdkeys = nilfs_btree_node_dkeys(btree, right);
  276. rdptrs = nilfs_btree_node_dptrs(btree, right);
  277. rnchildren = nilfs_btree_node_get_nchildren(btree, right);
  278. memcpy(ldkeys + lnchildren, rdkeys, n * sizeof(*rdkeys));
  279. memcpy(ldptrs + lnchildren, rdptrs, n * sizeof(*rdptrs));
  280. memmove(rdkeys, rdkeys + n, (rnchildren - n) * sizeof(*rdkeys));
  281. memmove(rdptrs, rdptrs + n, (rnchildren - n) * sizeof(*rdptrs));
  282. lnchildren += n;
  283. rnchildren -= n;
  284. nilfs_btree_node_set_nchildren(btree, left, lnchildren);
  285. nilfs_btree_node_set_nchildren(btree, right, rnchildren);
  286. }
  287. /* Assume that the buffer heads corresponding to left and right are locked. */
  288. static void nilfs_btree_node_move_right(struct nilfs_btree *btree,
  289. struct nilfs_btree_node *left,
  290. struct nilfs_btree_node *right,
  291. int n)
  292. {
  293. __le64 *ldkeys, *rdkeys;
  294. __le64 *ldptrs, *rdptrs;
  295. int lnchildren, rnchildren;
  296. ldkeys = nilfs_btree_node_dkeys(btree, left);
  297. ldptrs = nilfs_btree_node_dptrs(btree, left);
  298. lnchildren = nilfs_btree_node_get_nchildren(btree, left);
  299. rdkeys = nilfs_btree_node_dkeys(btree, right);
  300. rdptrs = nilfs_btree_node_dptrs(btree, right);
  301. rnchildren = nilfs_btree_node_get_nchildren(btree, right);
  302. memmove(rdkeys + n, rdkeys, rnchildren * sizeof(*rdkeys));
  303. memmove(rdptrs + n, rdptrs, rnchildren * sizeof(*rdptrs));
  304. memcpy(rdkeys, ldkeys + lnchildren - n, n * sizeof(*rdkeys));
  305. memcpy(rdptrs, ldptrs + lnchildren - n, n * sizeof(*rdptrs));
  306. lnchildren -= n;
  307. rnchildren += n;
  308. nilfs_btree_node_set_nchildren(btree, left, lnchildren);
  309. nilfs_btree_node_set_nchildren(btree, right, rnchildren);
  310. }
  311. /* Assume that the buffer head corresponding to node is locked. */
  312. static void nilfs_btree_node_insert(struct nilfs_btree *btree,
  313. struct nilfs_btree_node *node,
  314. __u64 key, __u64 ptr, int index)
  315. {
  316. __le64 *dkeys;
  317. __le64 *dptrs;
  318. int nchildren;
  319. dkeys = nilfs_btree_node_dkeys(btree, node);
  320. dptrs = nilfs_btree_node_dptrs(btree, node);
  321. nchildren = nilfs_btree_node_get_nchildren(btree, node);
  322. if (index < nchildren) {
  323. memmove(dkeys + index + 1, dkeys + index,
  324. (nchildren - index) * sizeof(*dkeys));
  325. memmove(dptrs + index + 1, dptrs + index,
  326. (nchildren - index) * sizeof(*dptrs));
  327. }
  328. dkeys[index] = nilfs_bmap_key_to_dkey(key);
  329. dptrs[index] = nilfs_bmap_ptr_to_dptr(ptr);
  330. nchildren++;
  331. nilfs_btree_node_set_nchildren(btree, node, nchildren);
  332. }
  333. /* Assume that the buffer head corresponding to node is locked. */
  334. static void nilfs_btree_node_delete(struct nilfs_btree *btree,
  335. struct nilfs_btree_node *node,
  336. __u64 *keyp, __u64 *ptrp, int index)
  337. {
  338. __u64 key;
  339. __u64 ptr;
  340. __le64 *dkeys;
  341. __le64 *dptrs;
  342. int nchildren;
  343. dkeys = nilfs_btree_node_dkeys(btree, node);
  344. dptrs = nilfs_btree_node_dptrs(btree, node);
  345. key = nilfs_bmap_dkey_to_key(dkeys[index]);
  346. ptr = nilfs_bmap_dptr_to_ptr(dptrs[index]);
  347. nchildren = nilfs_btree_node_get_nchildren(btree, node);
  348. if (keyp != NULL)
  349. *keyp = key;
  350. if (ptrp != NULL)
  351. *ptrp = ptr;
  352. if (index < nchildren - 1) {
  353. memmove(dkeys + index, dkeys + index + 1,
  354. (nchildren - index - 1) * sizeof(*dkeys));
  355. memmove(dptrs + index, dptrs + index + 1,
  356. (nchildren - index - 1) * sizeof(*dptrs));
  357. }
  358. nchildren--;
  359. nilfs_btree_node_set_nchildren(btree, node, nchildren);
  360. }
  361. static int nilfs_btree_node_lookup(const struct nilfs_btree *btree,
  362. const struct nilfs_btree_node *node,
  363. __u64 key, int *indexp)
  364. {
  365. __u64 nkey;
  366. int index, low, high, s;
  367. /* binary search */
  368. low = 0;
  369. high = nilfs_btree_node_get_nchildren(btree, node) - 1;
  370. index = 0;
  371. s = 0;
  372. while (low <= high) {
  373. index = (low + high) / 2;
  374. nkey = nilfs_btree_node_get_key(btree, node, index);
  375. if (nkey == key) {
  376. s = 0;
  377. goto out;
  378. } else if (nkey < key) {
  379. low = index + 1;
  380. s = -1;
  381. } else {
  382. high = index - 1;
  383. s = 1;
  384. }
  385. }
  386. /* adjust index */
  387. if (nilfs_btree_node_get_level(btree, node) >
  388. NILFS_BTREE_LEVEL_NODE_MIN) {
  389. if ((s > 0) && (index > 0))
  390. index--;
  391. } else if (s < 0)
  392. index++;
  393. out:
  394. *indexp = index;
  395. return s == 0;
  396. }
  397. static inline struct nilfs_btree_node *
  398. nilfs_btree_get_root(const struct nilfs_btree *btree)
  399. {
  400. return (struct nilfs_btree_node *)btree->bt_bmap.b_u.u_data;
  401. }
  402. static inline struct nilfs_btree_node *
  403. nilfs_btree_get_nonroot_node(const struct nilfs_btree *btree,
  404. const struct nilfs_btree_path *path,
  405. int level)
  406. {
  407. return (struct nilfs_btree_node *)path[level].bp_bh->b_data;
  408. }
  409. static inline struct nilfs_btree_node *
  410. nilfs_btree_get_sib_node(const struct nilfs_btree *btree,
  411. const struct nilfs_btree_path *path,
  412. int level)
  413. {
  414. return (struct nilfs_btree_node *)path[level].bp_sib_bh->b_data;
  415. }
  416. static inline int nilfs_btree_height(const struct nilfs_btree *btree)
  417. {
  418. return nilfs_btree_node_get_level(btree, nilfs_btree_get_root(btree))
  419. + 1;
  420. }
  421. static inline struct nilfs_btree_node *
  422. nilfs_btree_get_node(const struct nilfs_btree *btree,
  423. const struct nilfs_btree_path *path,
  424. int level)
  425. {
  426. return (level == nilfs_btree_height(btree) - 1) ?
  427. nilfs_btree_get_root(btree) :
  428. nilfs_btree_get_nonroot_node(btree, path, level);
  429. }
  430. static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
  431. struct nilfs_btree_path *path,
  432. __u64 key, __u64 *ptrp, int minlevel)
  433. {
  434. struct nilfs_btree_node *node;
  435. __u64 ptr;
  436. int level, index, found, ret;
  437. node = nilfs_btree_get_root(btree);
  438. level = nilfs_btree_node_get_level(btree, node);
  439. if ((level < minlevel) ||
  440. (nilfs_btree_node_get_nchildren(btree, node) <= 0))
  441. return -ENOENT;
  442. found = nilfs_btree_node_lookup(btree, node, key, &index);
  443. ptr = nilfs_btree_node_get_ptr(btree, node, index);
  444. path[level].bp_bh = NULL;
  445. path[level].bp_index = index;
  446. for (level--; level >= minlevel; level--) {
  447. ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh);
  448. if (ret < 0)
  449. return ret;
  450. node = nilfs_btree_get_nonroot_node(btree, path, level);
  451. BUG_ON(level != nilfs_btree_node_get_level(btree, node));
  452. if (!found)
  453. found = nilfs_btree_node_lookup(btree, node, key,
  454. &index);
  455. else
  456. index = 0;
  457. if (index < nilfs_btree_node_nchildren_max(btree, node))
  458. ptr = nilfs_btree_node_get_ptr(btree, node, index);
  459. else {
  460. WARN_ON(found || level != NILFS_BTREE_LEVEL_NODE_MIN);
  461. /* insert */
  462. ptr = NILFS_BMAP_INVALID_PTR;
  463. }
  464. path[level].bp_index = index;
  465. }
  466. if (!found)
  467. return -ENOENT;
  468. if (ptrp != NULL)
  469. *ptrp = ptr;
  470. return 0;
  471. }
  472. static int nilfs_btree_do_lookup_last(const struct nilfs_btree *btree,
  473. struct nilfs_btree_path *path,
  474. __u64 *keyp, __u64 *ptrp)
  475. {
  476. struct nilfs_btree_node *node;
  477. __u64 ptr;
  478. int index, level, ret;
  479. node = nilfs_btree_get_root(btree);
  480. index = nilfs_btree_node_get_nchildren(btree, node) - 1;
  481. if (index < 0)
  482. return -ENOENT;
  483. level = nilfs_btree_node_get_level(btree, node);
  484. ptr = nilfs_btree_node_get_ptr(btree, node, index);
  485. path[level].bp_bh = NULL;
  486. path[level].bp_index = index;
  487. for (level--; level > 0; level--) {
  488. ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh);
  489. if (ret < 0)
  490. return ret;
  491. node = nilfs_btree_get_nonroot_node(btree, path, level);
  492. BUG_ON(level != nilfs_btree_node_get_level(btree, node));
  493. index = nilfs_btree_node_get_nchildren(btree, node) - 1;
  494. ptr = nilfs_btree_node_get_ptr(btree, node, index);
  495. path[level].bp_index = index;
  496. }
  497. if (keyp != NULL)
  498. *keyp = nilfs_btree_node_get_key(btree, node, index);
  499. if (ptrp != NULL)
  500. *ptrp = ptr;
  501. return 0;
  502. }
  503. static int nilfs_btree_lookup(const struct nilfs_bmap *bmap,
  504. __u64 key, int level, __u64 *ptrp)
  505. {
  506. struct nilfs_btree *btree;
  507. struct nilfs_btree_path *path;
  508. __u64 ptr;
  509. int ret;
  510. btree = (struct nilfs_btree *)bmap;
  511. path = nilfs_btree_alloc_path(btree);
  512. if (path == NULL)
  513. return -ENOMEM;
  514. nilfs_btree_init_path(btree, path);
  515. ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level);
  516. if (ptrp != NULL)
  517. *ptrp = ptr;
  518. nilfs_btree_clear_path(btree, path);
  519. nilfs_btree_free_path(btree, path);
  520. return ret;
  521. }
  522. static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap,
  523. __u64 key, __u64 *ptrp, unsigned maxblocks)
  524. {
  525. struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
  526. struct nilfs_btree_path *path;
  527. struct nilfs_btree_node *node;
  528. struct inode *dat = NULL;
  529. __u64 ptr, ptr2;
  530. sector_t blocknr;
  531. int level = NILFS_BTREE_LEVEL_NODE_MIN;
  532. int ret, cnt, index, maxlevel;
  533. path = nilfs_btree_alloc_path(btree);
  534. if (path == NULL)
  535. return -ENOMEM;
  536. nilfs_btree_init_path(btree, path);
  537. ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level);
  538. if (ret < 0)
  539. goto out;
  540. if (NILFS_BMAP_USE_VBN(bmap)) {
  541. dat = nilfs_bmap_get_dat(bmap);
  542. ret = nilfs_dat_translate(dat, ptr, &blocknr);
  543. if (ret < 0)
  544. goto out;
  545. ptr = blocknr;
  546. }
  547. cnt = 1;
  548. if (cnt == maxblocks)
  549. goto end;
  550. maxlevel = nilfs_btree_height(btree) - 1;
  551. node = nilfs_btree_get_node(btree, path, level);
  552. index = path[level].bp_index + 1;
  553. for (;;) {
  554. while (index < nilfs_btree_node_get_nchildren(btree, node)) {
  555. if (nilfs_btree_node_get_key(btree, node, index) !=
  556. key + cnt)
  557. goto end;
  558. ptr2 = nilfs_btree_node_get_ptr(btree, node, index);
  559. if (dat) {
  560. ret = nilfs_dat_translate(dat, ptr2, &blocknr);
  561. if (ret < 0)
  562. goto out;
  563. ptr2 = blocknr;
  564. }
  565. if (ptr2 != ptr + cnt || ++cnt == maxblocks)
  566. goto end;
  567. index++;
  568. continue;
  569. }
  570. if (level == maxlevel)
  571. break;
  572. /* look-up right sibling node */
  573. node = nilfs_btree_get_node(btree, path, level + 1);
  574. index = path[level + 1].bp_index + 1;
  575. if (index >= nilfs_btree_node_get_nchildren(btree, node) ||
  576. nilfs_btree_node_get_key(btree, node, index) != key + cnt)
  577. break;
  578. ptr2 = nilfs_btree_node_get_ptr(btree, node, index);
  579. path[level + 1].bp_index = index;
  580. brelse(path[level].bp_bh);
  581. path[level].bp_bh = NULL;
  582. ret = nilfs_btree_get_block(btree, ptr2, &path[level].bp_bh);
  583. if (ret < 0)
  584. goto out;
  585. node = nilfs_btree_get_nonroot_node(btree, path, level);
  586. index = 0;
  587. path[level].bp_index = index;
  588. }
  589. end:
  590. *ptrp = ptr;
  591. ret = cnt;
  592. out:
  593. nilfs_btree_clear_path(btree, path);
  594. nilfs_btree_free_path(btree, path);
  595. return ret;
  596. }
  597. static void nilfs_btree_promote_key(struct nilfs_btree *btree,
  598. struct nilfs_btree_path *path,
  599. int level, __u64 key)
  600. {
  601. if (level < nilfs_btree_height(btree) - 1) {
  602. do {
  603. lock_buffer(path[level].bp_bh);
  604. nilfs_btree_node_set_key(
  605. btree,
  606. nilfs_btree_get_nonroot_node(
  607. btree, path, level),
  608. path[level].bp_index, key);
  609. if (!buffer_dirty(path[level].bp_bh))
  610. nilfs_btnode_mark_dirty(path[level].bp_bh);
  611. unlock_buffer(path[level].bp_bh);
  612. } while ((path[level].bp_index == 0) &&
  613. (++level < nilfs_btree_height(btree) - 1));
  614. }
  615. /* root */
  616. if (level == nilfs_btree_height(btree) - 1) {
  617. nilfs_btree_node_set_key(btree,
  618. nilfs_btree_get_root(btree),
  619. path[level].bp_index, key);
  620. }
  621. }
  622. static void nilfs_btree_do_insert(struct nilfs_btree *btree,
  623. struct nilfs_btree_path *path,
  624. int level, __u64 *keyp, __u64 *ptrp)
  625. {
  626. struct nilfs_btree_node *node;
  627. if (level < nilfs_btree_height(btree) - 1) {
  628. lock_buffer(path[level].bp_bh);
  629. node = nilfs_btree_get_nonroot_node(btree, path, level);
  630. nilfs_btree_node_insert(btree, node, *keyp, *ptrp,
  631. path[level].bp_index);
  632. if (!buffer_dirty(path[level].bp_bh))
  633. nilfs_btnode_mark_dirty(path[level].bp_bh);
  634. unlock_buffer(path[level].bp_bh);
  635. if (path[level].bp_index == 0)
  636. nilfs_btree_promote_key(btree, path, level + 1,
  637. nilfs_btree_node_get_key(
  638. btree, node, 0));
  639. } else {
  640. node = nilfs_btree_get_root(btree);
  641. nilfs_btree_node_insert(btree, node, *keyp, *ptrp,
  642. path[level].bp_index);
  643. }
  644. }
  645. static void nilfs_btree_carry_left(struct nilfs_btree *btree,
  646. struct nilfs_btree_path *path,
  647. int level, __u64 *keyp, __u64 *ptrp)
  648. {
  649. struct nilfs_btree_node *node, *left;
  650. int nchildren, lnchildren, n, move;
  651. lock_buffer(path[level].bp_bh);
  652. lock_buffer(path[level].bp_sib_bh);
  653. node = nilfs_btree_get_nonroot_node(btree, path, level);
  654. left = nilfs_btree_get_sib_node(btree, path, level);
  655. nchildren = nilfs_btree_node_get_nchildren(btree, node);
  656. lnchildren = nilfs_btree_node_get_nchildren(btree, left);
  657. move = 0;
  658. n = (nchildren + lnchildren + 1) / 2 - lnchildren;
  659. if (n > path[level].bp_index) {
  660. /* move insert point */
  661. n--;
  662. move = 1;
  663. }
  664. nilfs_btree_node_move_left(btree, left, node, n);
  665. if (!buffer_dirty(path[level].bp_bh))
  666. nilfs_btnode_mark_dirty(path[level].bp_bh);
  667. if (!buffer_dirty(path[level].bp_sib_bh))
  668. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  669. unlock_buffer(path[level].bp_bh);
  670. unlock_buffer(path[level].bp_sib_bh);
  671. nilfs_btree_promote_key(btree, path, level + 1,
  672. nilfs_btree_node_get_key(btree, node, 0));
  673. if (move) {
  674. brelse(path[level].bp_bh);
  675. path[level].bp_bh = path[level].bp_sib_bh;
  676. path[level].bp_sib_bh = NULL;
  677. path[level].bp_index += lnchildren;
  678. path[level + 1].bp_index--;
  679. } else {
  680. brelse(path[level].bp_sib_bh);
  681. path[level].bp_sib_bh = NULL;
  682. path[level].bp_index -= n;
  683. }
  684. nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
  685. }
  686. static void nilfs_btree_carry_right(struct nilfs_btree *btree,
  687. struct nilfs_btree_path *path,
  688. int level, __u64 *keyp, __u64 *ptrp)
  689. {
  690. struct nilfs_btree_node *node, *right;
  691. int nchildren, rnchildren, n, move;
  692. lock_buffer(path[level].bp_bh);
  693. lock_buffer(path[level].bp_sib_bh);
  694. node = nilfs_btree_get_nonroot_node(btree, path, level);
  695. right = nilfs_btree_get_sib_node(btree, path, level);
  696. nchildren = nilfs_btree_node_get_nchildren(btree, node);
  697. rnchildren = nilfs_btree_node_get_nchildren(btree, right);
  698. move = 0;
  699. n = (nchildren + rnchildren + 1) / 2 - rnchildren;
  700. if (n > nchildren - path[level].bp_index) {
  701. /* move insert point */
  702. n--;
  703. move = 1;
  704. }
  705. nilfs_btree_node_move_right(btree, node, right, n);
  706. if (!buffer_dirty(path[level].bp_bh))
  707. nilfs_btnode_mark_dirty(path[level].bp_bh);
  708. if (!buffer_dirty(path[level].bp_sib_bh))
  709. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  710. unlock_buffer(path[level].bp_bh);
  711. unlock_buffer(path[level].bp_sib_bh);
  712. path[level + 1].bp_index++;
  713. nilfs_btree_promote_key(btree, path, level + 1,
  714. nilfs_btree_node_get_key(btree, right, 0));
  715. path[level + 1].bp_index--;
  716. if (move) {
  717. brelse(path[level].bp_bh);
  718. path[level].bp_bh = path[level].bp_sib_bh;
  719. path[level].bp_sib_bh = NULL;
  720. path[level].bp_index -=
  721. nilfs_btree_node_get_nchildren(btree, node);
  722. path[level + 1].bp_index++;
  723. } else {
  724. brelse(path[level].bp_sib_bh);
  725. path[level].bp_sib_bh = NULL;
  726. }
  727. nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
  728. }
  729. static void nilfs_btree_split(struct nilfs_btree *btree,
  730. struct nilfs_btree_path *path,
  731. int level, __u64 *keyp, __u64 *ptrp)
  732. {
  733. struct nilfs_btree_node *node, *right;
  734. __u64 newkey;
  735. __u64 newptr;
  736. int nchildren, n, move;
  737. lock_buffer(path[level].bp_bh);
  738. lock_buffer(path[level].bp_sib_bh);
  739. node = nilfs_btree_get_nonroot_node(btree, path, level);
  740. right = nilfs_btree_get_sib_node(btree, path, level);
  741. nchildren = nilfs_btree_node_get_nchildren(btree, node);
  742. move = 0;
  743. n = (nchildren + 1) / 2;
  744. if (n > nchildren - path[level].bp_index) {
  745. n--;
  746. move = 1;
  747. }
  748. nilfs_btree_node_move_right(btree, node, right, n);
  749. if (!buffer_dirty(path[level].bp_bh))
  750. nilfs_btnode_mark_dirty(path[level].bp_bh);
  751. if (!buffer_dirty(path[level].bp_sib_bh))
  752. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  753. unlock_buffer(path[level].bp_bh);
  754. unlock_buffer(path[level].bp_sib_bh);
  755. newkey = nilfs_btree_node_get_key(btree, right, 0);
  756. newptr = path[level].bp_newreq.bpr_ptr;
  757. if (move) {
  758. path[level].bp_index -=
  759. nilfs_btree_node_get_nchildren(btree, node);
  760. nilfs_btree_node_insert(btree, right, *keyp, *ptrp,
  761. path[level].bp_index);
  762. *keyp = nilfs_btree_node_get_key(btree, right, 0);
  763. *ptrp = path[level].bp_newreq.bpr_ptr;
  764. brelse(path[level].bp_bh);
  765. path[level].bp_bh = path[level].bp_sib_bh;
  766. path[level].bp_sib_bh = NULL;
  767. } else {
  768. nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
  769. *keyp = nilfs_btree_node_get_key(btree, right, 0);
  770. *ptrp = path[level].bp_newreq.bpr_ptr;
  771. brelse(path[level].bp_sib_bh);
  772. path[level].bp_sib_bh = NULL;
  773. }
  774. path[level + 1].bp_index++;
  775. }
  776. static void nilfs_btree_grow(struct nilfs_btree *btree,
  777. struct nilfs_btree_path *path,
  778. int level, __u64 *keyp, __u64 *ptrp)
  779. {
  780. struct nilfs_btree_node *root, *child;
  781. int n;
  782. lock_buffer(path[level].bp_sib_bh);
  783. root = nilfs_btree_get_root(btree);
  784. child = nilfs_btree_get_sib_node(btree, path, level);
  785. n = nilfs_btree_node_get_nchildren(btree, root);
  786. nilfs_btree_node_move_right(btree, root, child, n);
  787. nilfs_btree_node_set_level(btree, root, level + 1);
  788. if (!buffer_dirty(path[level].bp_sib_bh))
  789. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  790. unlock_buffer(path[level].bp_sib_bh);
  791. path[level].bp_bh = path[level].bp_sib_bh;
  792. path[level].bp_sib_bh = NULL;
  793. nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
  794. *keyp = nilfs_btree_node_get_key(btree, child, 0);
  795. *ptrp = path[level].bp_newreq.bpr_ptr;
  796. }
  797. static __u64 nilfs_btree_find_near(const struct nilfs_btree *btree,
  798. const struct nilfs_btree_path *path)
  799. {
  800. struct nilfs_btree_node *node;
  801. int level;
  802. if (path == NULL)
  803. return NILFS_BMAP_INVALID_PTR;
  804. /* left sibling */
  805. level = NILFS_BTREE_LEVEL_NODE_MIN;
  806. if (path[level].bp_index > 0) {
  807. node = nilfs_btree_get_node(btree, path, level);
  808. return nilfs_btree_node_get_ptr(btree, node,
  809. path[level].bp_index - 1);
  810. }
  811. /* parent */
  812. level = NILFS_BTREE_LEVEL_NODE_MIN + 1;
  813. if (level <= nilfs_btree_height(btree) - 1) {
  814. node = nilfs_btree_get_node(btree, path, level);
  815. return nilfs_btree_node_get_ptr(btree, node,
  816. path[level].bp_index);
  817. }
  818. return NILFS_BMAP_INVALID_PTR;
  819. }
  820. static __u64 nilfs_btree_find_target_v(const struct nilfs_btree *btree,
  821. const struct nilfs_btree_path *path,
  822. __u64 key)
  823. {
  824. __u64 ptr;
  825. ptr = nilfs_bmap_find_target_seq(&btree->bt_bmap, key);
  826. if (ptr != NILFS_BMAP_INVALID_PTR)
  827. /* sequential access */
  828. return ptr;
  829. else {
  830. ptr = nilfs_btree_find_near(btree, path);
  831. if (ptr != NILFS_BMAP_INVALID_PTR)
  832. /* near */
  833. return ptr;
  834. }
  835. /* block group */
  836. return nilfs_bmap_find_target_in_group(&btree->bt_bmap);
  837. }
  838. static void nilfs_btree_set_target_v(struct nilfs_btree *btree, __u64 key,
  839. __u64 ptr)
  840. {
  841. btree->bt_bmap.b_last_allocated_key = key;
  842. btree->bt_bmap.b_last_allocated_ptr = ptr;
  843. }
  844. static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
  845. struct nilfs_btree_path *path,
  846. int *levelp, __u64 key, __u64 ptr,
  847. struct nilfs_bmap_stats *stats)
  848. {
  849. struct buffer_head *bh;
  850. struct nilfs_btree_node *node, *parent, *sib;
  851. __u64 sibptr;
  852. int pindex, level, ret;
  853. stats->bs_nblocks = 0;
  854. level = NILFS_BTREE_LEVEL_DATA;
  855. /* allocate a new ptr for data block */
  856. if (NILFS_BMAP_USE_VBN(&btree->bt_bmap))
  857. path[level].bp_newreq.bpr_ptr =
  858. nilfs_btree_find_target_v(btree, path, key);
  859. ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
  860. &path[level].bp_newreq);
  861. if (ret < 0)
  862. goto err_out_data;
  863. for (level = NILFS_BTREE_LEVEL_NODE_MIN;
  864. level < nilfs_btree_height(btree) - 1;
  865. level++) {
  866. node = nilfs_btree_get_nonroot_node(btree, path, level);
  867. if (nilfs_btree_node_get_nchildren(btree, node) <
  868. nilfs_btree_node_nchildren_max(btree, node)) {
  869. path[level].bp_op = nilfs_btree_do_insert;
  870. stats->bs_nblocks++;
  871. goto out;
  872. }
  873. parent = nilfs_btree_get_node(btree, path, level + 1);
  874. pindex = path[level + 1].bp_index;
  875. /* left sibling */
  876. if (pindex > 0) {
  877. sibptr = nilfs_btree_node_get_ptr(btree, parent,
  878. pindex - 1);
  879. ret = nilfs_btree_get_block(btree, sibptr, &bh);
  880. if (ret < 0)
  881. goto err_out_child_node;
  882. sib = (struct nilfs_btree_node *)bh->b_data;
  883. if (nilfs_btree_node_get_nchildren(btree, sib) <
  884. nilfs_btree_node_nchildren_max(btree, sib)) {
  885. path[level].bp_sib_bh = bh;
  886. path[level].bp_op = nilfs_btree_carry_left;
  887. stats->bs_nblocks++;
  888. goto out;
  889. } else
  890. brelse(bh);
  891. }
  892. /* right sibling */
  893. if (pindex <
  894. nilfs_btree_node_get_nchildren(btree, parent) - 1) {
  895. sibptr = nilfs_btree_node_get_ptr(btree, parent,
  896. pindex + 1);
  897. ret = nilfs_btree_get_block(btree, sibptr, &bh);
  898. if (ret < 0)
  899. goto err_out_child_node;
  900. sib = (struct nilfs_btree_node *)bh->b_data;
  901. if (nilfs_btree_node_get_nchildren(btree, sib) <
  902. nilfs_btree_node_nchildren_max(btree, sib)) {
  903. path[level].bp_sib_bh = bh;
  904. path[level].bp_op = nilfs_btree_carry_right;
  905. stats->bs_nblocks++;
  906. goto out;
  907. } else
  908. brelse(bh);
  909. }
  910. /* split */
  911. path[level].bp_newreq.bpr_ptr =
  912. path[level - 1].bp_newreq.bpr_ptr + 1;
  913. ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
  914. &path[level].bp_newreq);
  915. if (ret < 0)
  916. goto err_out_child_node;
  917. ret = nilfs_btree_get_new_block(btree,
  918. path[level].bp_newreq.bpr_ptr,
  919. &bh);
  920. if (ret < 0)
  921. goto err_out_curr_node;
  922. stats->bs_nblocks++;
  923. lock_buffer(bh);
  924. nilfs_btree_node_init(btree,
  925. (struct nilfs_btree_node *)bh->b_data,
  926. 0, level, 0, NULL, NULL);
  927. unlock_buffer(bh);
  928. path[level].bp_sib_bh = bh;
  929. path[level].bp_op = nilfs_btree_split;
  930. }
  931. /* root */
  932. node = nilfs_btree_get_root(btree);
  933. if (nilfs_btree_node_get_nchildren(btree, node) <
  934. nilfs_btree_node_nchildren_max(btree, node)) {
  935. path[level].bp_op = nilfs_btree_do_insert;
  936. stats->bs_nblocks++;
  937. goto out;
  938. }
  939. /* grow */
  940. path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1;
  941. ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
  942. &path[level].bp_newreq);
  943. if (ret < 0)
  944. goto err_out_child_node;
  945. ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr,
  946. &bh);
  947. if (ret < 0)
  948. goto err_out_curr_node;
  949. lock_buffer(bh);
  950. nilfs_btree_node_init(btree, (struct nilfs_btree_node *)bh->b_data,
  951. 0, level, 0, NULL, NULL);
  952. unlock_buffer(bh);
  953. path[level].bp_sib_bh = bh;
  954. path[level].bp_op = nilfs_btree_grow;
  955. level++;
  956. path[level].bp_op = nilfs_btree_do_insert;
  957. /* a newly-created node block and a data block are added */
  958. stats->bs_nblocks += 2;
  959. /* success */
  960. out:
  961. *levelp = level;
  962. return ret;
  963. /* error */
  964. err_out_curr_node:
  965. nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq);
  966. err_out_child_node:
  967. for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) {
  968. nilfs_btnode_delete(path[level].bp_sib_bh);
  969. nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap,
  970. &path[level].bp_newreq);
  971. }
  972. nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq);
  973. err_out_data:
  974. *levelp = level;
  975. stats->bs_nblocks = 0;
  976. return ret;
  977. }
  978. static void nilfs_btree_commit_insert(struct nilfs_btree *btree,
  979. struct nilfs_btree_path *path,
  980. int maxlevel, __u64 key, __u64 ptr)
  981. {
  982. int level;
  983. set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
  984. ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr;
  985. if (NILFS_BMAP_USE_VBN(&btree->bt_bmap))
  986. nilfs_btree_set_target_v(btree, key, ptr);
  987. for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
  988. nilfs_bmap_commit_alloc_ptr(&btree->bt_bmap,
  989. &path[level - 1].bp_newreq);
  990. path[level].bp_op(btree, path, level, &key, &ptr);
  991. }
  992. if (!nilfs_bmap_dirty(&btree->bt_bmap))
  993. nilfs_bmap_set_dirty(&btree->bt_bmap);
  994. }
  995. static int nilfs_btree_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
  996. {
  997. struct nilfs_btree *btree;
  998. struct nilfs_btree_path *path;
  999. struct nilfs_bmap_stats stats;
  1000. int level, ret;
  1001. btree = (struct nilfs_btree *)bmap;
  1002. path = nilfs_btree_alloc_path(btree);
  1003. if (path == NULL)
  1004. return -ENOMEM;
  1005. nilfs_btree_init_path(btree, path);
  1006. ret = nilfs_btree_do_lookup(btree, path, key, NULL,
  1007. NILFS_BTREE_LEVEL_NODE_MIN);
  1008. if (ret != -ENOENT) {
  1009. if (ret == 0)
  1010. ret = -EEXIST;
  1011. goto out;
  1012. }
  1013. ret = nilfs_btree_prepare_insert(btree, path, &level, key, ptr, &stats);
  1014. if (ret < 0)
  1015. goto out;
  1016. nilfs_btree_commit_insert(btree, path, level, key, ptr);
  1017. nilfs_bmap_add_blocks(bmap, stats.bs_nblocks);
  1018. out:
  1019. nilfs_btree_clear_path(btree, path);
  1020. nilfs_btree_free_path(btree, path);
  1021. return ret;
  1022. }
  1023. static void nilfs_btree_do_delete(struct nilfs_btree *btree,
  1024. struct nilfs_btree_path *path,
  1025. int level, __u64 *keyp, __u64 *ptrp)
  1026. {
  1027. struct nilfs_btree_node *node;
  1028. if (level < nilfs_btree_height(btree) - 1) {
  1029. lock_buffer(path[level].bp_bh);
  1030. node = nilfs_btree_get_nonroot_node(btree, path, level);
  1031. nilfs_btree_node_delete(btree, node, keyp, ptrp,
  1032. path[level].bp_index);
  1033. if (!buffer_dirty(path[level].bp_bh))
  1034. nilfs_btnode_mark_dirty(path[level].bp_bh);
  1035. unlock_buffer(path[level].bp_bh);
  1036. if (path[level].bp_index == 0)
  1037. nilfs_btree_promote_key(btree, path, level + 1,
  1038. nilfs_btree_node_get_key(btree, node, 0));
  1039. } else {
  1040. node = nilfs_btree_get_root(btree);
  1041. nilfs_btree_node_delete(btree, node, keyp, ptrp,
  1042. path[level].bp_index);
  1043. }
  1044. }
  1045. static void nilfs_btree_borrow_left(struct nilfs_btree *btree,
  1046. struct nilfs_btree_path *path,
  1047. int level, __u64 *keyp, __u64 *ptrp)
  1048. {
  1049. struct nilfs_btree_node *node, *left;
  1050. int nchildren, lnchildren, n;
  1051. nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
  1052. lock_buffer(path[level].bp_bh);
  1053. lock_buffer(path[level].bp_sib_bh);
  1054. node = nilfs_btree_get_nonroot_node(btree, path, level);
  1055. left = nilfs_btree_get_sib_node(btree, path, level);
  1056. nchildren = nilfs_btree_node_get_nchildren(btree, node);
  1057. lnchildren = nilfs_btree_node_get_nchildren(btree, left);
  1058. n = (nchildren + lnchildren) / 2 - nchildren;
  1059. nilfs_btree_node_move_right(btree, left, node, n);
  1060. if (!buffer_dirty(path[level].bp_bh))
  1061. nilfs_btnode_mark_dirty(path[level].bp_bh);
  1062. if (!buffer_dirty(path[level].bp_sib_bh))
  1063. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  1064. unlock_buffer(path[level].bp_bh);
  1065. unlock_buffer(path[level].bp_sib_bh);
  1066. nilfs_btree_promote_key(btree, path, level + 1,
  1067. nilfs_btree_node_get_key(btree, node, 0));
  1068. brelse(path[level].bp_sib_bh);
  1069. path[level].bp_sib_bh = NULL;
  1070. path[level].bp_index += n;
  1071. }
  1072. static void nilfs_btree_borrow_right(struct nilfs_btree *btree,
  1073. struct nilfs_btree_path *path,
  1074. int level, __u64 *keyp, __u64 *ptrp)
  1075. {
  1076. struct nilfs_btree_node *node, *right;
  1077. int nchildren, rnchildren, n;
  1078. nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
  1079. lock_buffer(path[level].bp_bh);
  1080. lock_buffer(path[level].bp_sib_bh);
  1081. node = nilfs_btree_get_nonroot_node(btree, path, level);
  1082. right = nilfs_btree_get_sib_node(btree, path, level);
  1083. nchildren = nilfs_btree_node_get_nchildren(btree, node);
  1084. rnchildren = nilfs_btree_node_get_nchildren(btree, right);
  1085. n = (nchildren + rnchildren) / 2 - nchildren;
  1086. nilfs_btree_node_move_left(btree, node, right, n);
  1087. if (!buffer_dirty(path[level].bp_bh))
  1088. nilfs_btnode_mark_dirty(path[level].bp_bh);
  1089. if (!buffer_dirty(path[level].bp_sib_bh))
  1090. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  1091. unlock_buffer(path[level].bp_bh);
  1092. unlock_buffer(path[level].bp_sib_bh);
  1093. path[level + 1].bp_index++;
  1094. nilfs_btree_promote_key(btree, path, level + 1,
  1095. nilfs_btree_node_get_key(btree, right, 0));
  1096. path[level + 1].bp_index--;
  1097. brelse(path[level].bp_sib_bh);
  1098. path[level].bp_sib_bh = NULL;
  1099. }
  1100. static void nilfs_btree_concat_left(struct nilfs_btree *btree,
  1101. struct nilfs_btree_path *path,
  1102. int level, __u64 *keyp, __u64 *ptrp)
  1103. {
  1104. struct nilfs_btree_node *node, *left;
  1105. int n;
  1106. nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
  1107. lock_buffer(path[level].bp_bh);
  1108. lock_buffer(path[level].bp_sib_bh);
  1109. node = nilfs_btree_get_nonroot_node(btree, path, level);
  1110. left = nilfs_btree_get_sib_node(btree, path, level);
  1111. n = nilfs_btree_node_get_nchildren(btree, node);
  1112. nilfs_btree_node_move_left(btree, left, node, n);
  1113. if (!buffer_dirty(path[level].bp_sib_bh))
  1114. nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
  1115. unlock_buffer(path[level].bp_bh);
  1116. unlock_buffer(path[level].bp_sib_bh);
  1117. nilfs_btnode_delete(path[level].bp_bh);
  1118. path[level].bp_bh = path[level].bp_sib_bh;
  1119. path[level].bp_sib_bh = NULL;
  1120. path[level].bp_index += nilfs_btree_node_get_nchildren(btree, left);
  1121. }
  1122. static void nilfs_btree_concat_right(struct nilfs_btree *btree,
  1123. struct nilfs_btree_path *path,
  1124. int level, __u64 *keyp, __u64 *ptrp)
  1125. {
  1126. struct nilfs_btree_node *node, *right;
  1127. int n;
  1128. nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
  1129. lock_buffer(path[level].bp_bh);
  1130. lock_buffer(path[level].bp_sib_bh);
  1131. node = nilfs_btree_get_nonroot_node(btree, path, level);
  1132. right = nilfs_btree_get_sib_node(btree, path, level);
  1133. n = nilfs_btree_node_get_nchildren(btree, right);
  1134. nilfs_btree_node_move_left(btree, node, right, n);
  1135. if (!buffer_dirty(path[level].bp_bh))
  1136. nilfs_btnode_mark_dirty(path[level].bp_bh);
  1137. unlock_buffer(path[level].bp_bh);
  1138. unlock_buffer(path[level].bp_sib_bh);
  1139. nilfs_btnode_delete(path[level].bp_sib_bh);
  1140. path[level].bp_sib_bh = NULL;
  1141. path[level + 1].bp_index++;
  1142. }
  1143. static void nilfs_btree_shrink(struct nilfs_btree *btree,
  1144. struct nilfs_btree_path *path,
  1145. int level, __u64 *keyp, __u64 *ptrp)
  1146. {
  1147. struct nilfs_btree_node *root, *child;
  1148. int n;
  1149. nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
  1150. lock_buffer(path[level].bp_bh);
  1151. root = nilfs_btree_get_root(btree);
  1152. child = nilfs_btree_get_nonroot_node(btree, path, level);
  1153. nilfs_btree_node_delete(btree, root, NULL, NULL, 0);
  1154. nilfs_btree_node_set_level(btree, root, level);
  1155. n = nilfs_btree_node_get_nchildren(btree, child);
  1156. nilfs_btree_node_move_left(btree, root, child, n);
  1157. unlock_buffer(path[level].bp_bh);
  1158. nilfs_btnode_delete(path[level].bp_bh);
  1159. path[level].bp_bh = NULL;
  1160. }
  1161. static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
  1162. struct nilfs_btree_path *path,
  1163. int *levelp,
  1164. struct nilfs_bmap_stats *stats)
  1165. {
  1166. struct buffer_head *bh;
  1167. struct nilfs_btree_node *node, *parent, *sib;
  1168. __u64 sibptr;
  1169. int pindex, level, ret;
  1170. ret = 0;
  1171. stats->bs_nblocks = 0;
  1172. for (level = NILFS_BTREE_LEVEL_NODE_MIN;
  1173. level < nilfs_btree_height(btree) - 1;
  1174. level++) {
  1175. node = nilfs_btree_get_nonroot_node(btree, path, level);
  1176. path[level].bp_oldreq.bpr_ptr =
  1177. nilfs_btree_node_get_ptr(btree, node,
  1178. path[level].bp_index);
  1179. ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
  1180. &path[level].bp_oldreq);
  1181. if (ret < 0)
  1182. goto err_out_child_node;
  1183. if (nilfs_btree_node_get_nchildren(btree, node) >
  1184. nilfs_btree_node_nchildren_min(btree, node)) {
  1185. path[level].bp_op = nilfs_btree_do_delete;
  1186. stats->bs_nblocks++;
  1187. goto out;
  1188. }
  1189. parent = nilfs_btree_get_node(btree, path, level + 1);
  1190. pindex = path[level + 1].bp_index;
  1191. if (pindex > 0) {
  1192. /* left sibling */
  1193. sibptr = nilfs_btree_node_get_ptr(btree, parent,
  1194. pindex - 1);
  1195. ret = nilfs_btree_get_block(btree, sibptr, &bh);
  1196. if (ret < 0)
  1197. goto err_out_curr_node;
  1198. sib = (struct nilfs_btree_node *)bh->b_data;
  1199. if (nilfs_btree_node_get_nchildren(btree, sib) >
  1200. nilfs_btree_node_nchildren_min(btree, sib)) {
  1201. path[level].bp_sib_bh = bh;
  1202. path[level].bp_op = nilfs_btree_borrow_left;
  1203. stats->bs_nblocks++;
  1204. goto out;
  1205. } else {
  1206. path[level].bp_sib_bh = bh;
  1207. path[level].bp_op = nilfs_btree_concat_left;
  1208. stats->bs_nblocks++;
  1209. /* continue; */
  1210. }
  1211. } else if (pindex <
  1212. nilfs_btree_node_get_nchildren(btree, parent) - 1) {
  1213. /* right sibling */
  1214. sibptr = nilfs_btree_node_get_ptr(btree, parent,
  1215. pindex + 1);
  1216. ret = nilfs_btree_get_block(btree, sibptr, &bh);
  1217. if (ret < 0)
  1218. goto err_out_curr_node;
  1219. sib = (struct nilfs_btree_node *)bh->b_data;
  1220. if (nilfs_btree_node_get_nchildren(btree, sib) >
  1221. nilfs_btree_node_nchildren_min(btree, sib)) {
  1222. path[level].bp_sib_bh = bh;
  1223. path[level].bp_op = nilfs_btree_borrow_right;
  1224. stats->bs_nblocks++;
  1225. goto out;
  1226. } else {
  1227. path[level].bp_sib_bh = bh;
  1228. path[level].bp_op = nilfs_btree_concat_right;
  1229. stats->bs_nblocks++;
  1230. /* continue; */
  1231. }
  1232. } else {
  1233. /* no siblings */
  1234. /* the only child of the root node */
  1235. WARN_ON(level != nilfs_btree_height(btree) - 2);
  1236. if (nilfs_btree_node_get_nchildren(btree, node) - 1 <=
  1237. NILFS_BTREE_ROOT_NCHILDREN_MAX) {
  1238. path[level].bp_op = nilfs_btree_shrink;
  1239. stats->bs_nblocks += 2;
  1240. } else {
  1241. path[level].bp_op = nilfs_btree_do_delete;
  1242. stats->bs_nblocks++;
  1243. }
  1244. goto out;
  1245. }
  1246. }
  1247. node = nilfs_btree_get_root(btree);
  1248. path[level].bp_oldreq.bpr_ptr =
  1249. nilfs_btree_node_get_ptr(btree, node, path[level].bp_index);
  1250. ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
  1251. &path[level].bp_oldreq);
  1252. if (ret < 0)
  1253. goto err_out_child_node;
  1254. /* child of the root node is deleted */
  1255. path[level].bp_op = nilfs_btree_do_delete;
  1256. stats->bs_nblocks++;
  1257. /* success */
  1258. out:
  1259. *levelp = level;
  1260. return ret;
  1261. /* error */
  1262. err_out_curr_node:
  1263. nilfs_bmap_abort_end_ptr(&btree->bt_bmap, &path[level].bp_oldreq);
  1264. err_out_child_node:
  1265. for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) {
  1266. brelse(path[level].bp_sib_bh);
  1267. nilfs_bmap_abort_end_ptr(&btree->bt_bmap,
  1268. &path[level].bp_oldreq);
  1269. }
  1270. *levelp = level;
  1271. stats->bs_nblocks = 0;
  1272. return ret;
  1273. }
  1274. static void nilfs_btree_commit_delete(struct nilfs_btree *btree,
  1275. struct nilfs_btree_path *path,
  1276. int maxlevel)
  1277. {
  1278. int level;
  1279. for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
  1280. nilfs_bmap_commit_end_ptr(&btree->bt_bmap,
  1281. &path[level].bp_oldreq);
  1282. path[level].bp_op(btree, path, level, NULL, NULL);
  1283. }
  1284. if (!nilfs_bmap_dirty(&btree->bt_bmap))
  1285. nilfs_bmap_set_dirty(&btree->bt_bmap);
  1286. }
  1287. static int nilfs_btree_delete(struct nilfs_bmap *bmap, __u64 key)
  1288. {
  1289. struct nilfs_btree *btree;
  1290. struct nilfs_btree_path *path;
  1291. struct nilfs_bmap_stats stats;
  1292. int level, ret;
  1293. btree = (struct nilfs_btree *)bmap;
  1294. path = nilfs_btree_alloc_path(btree);
  1295. if (path == NULL)
  1296. return -ENOMEM;
  1297. nilfs_btree_init_path(btree, path);
  1298. ret = nilfs_btree_do_lookup(btree, path, key, NULL,
  1299. NILFS_BTREE_LEVEL_NODE_MIN);
  1300. if (ret < 0)
  1301. goto out;
  1302. ret = nilfs_btree_prepare_delete(btree, path, &level, &stats);
  1303. if (ret < 0)
  1304. goto out;
  1305. nilfs_btree_commit_delete(btree, path, level);
  1306. nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks);
  1307. out:
  1308. nilfs_btree_clear_path(btree, path);
  1309. nilfs_btree_free_path(btree, path);
  1310. return ret;
  1311. }
  1312. static int nilfs_btree_last_key(const struct nilfs_bmap *bmap, __u64 *keyp)
  1313. {
  1314. struct nilfs_btree *btree;
  1315. struct nilfs_btree_path *path;
  1316. int ret;
  1317. btree = (struct nilfs_btree *)bmap;
  1318. path = nilfs_btree_alloc_path(btree);
  1319. if (path == NULL)
  1320. return -ENOMEM;
  1321. nilfs_btree_init_path(btree, path);
  1322. ret = nilfs_btree_do_lookup_last(btree, path, keyp, NULL);
  1323. nilfs_btree_clear_path(btree, path);
  1324. nilfs_btree_free_path(btree, path);
  1325. return ret;
  1326. }
  1327. static int nilfs_btree_check_delete(struct nilfs_bmap *bmap, __u64 key)
  1328. {
  1329. struct buffer_head *bh;
  1330. struct nilfs_btree *btree;
  1331. struct nilfs_btree_node *root, *node;
  1332. __u64 maxkey, nextmaxkey;
  1333. __u64 ptr;
  1334. int nchildren, ret;
  1335. btree = (struct nilfs_btree *)bmap;
  1336. root = nilfs_btree_get_root(btree);
  1337. switch (nilfs_btree_height(btree)) {
  1338. case 2:
  1339. bh = NULL;
  1340. node = root;
  1341. break;
  1342. case 3:
  1343. nchildren = nilfs_btree_node_get_nchildren(btree, root);
  1344. if (nchildren > 1)
  1345. return 0;
  1346. ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1);
  1347. ret = nilfs_btree_get_block(btree, ptr, &bh);
  1348. if (ret < 0)
  1349. return ret;
  1350. node = (struct nilfs_btree_node *)bh->b_data;
  1351. break;
  1352. default:
  1353. return 0;
  1354. }
  1355. nchildren = nilfs_btree_node_get_nchildren(btree, node);
  1356. maxkey = nilfs_btree_node_get_key(btree, node, nchildren - 1);
  1357. nextmaxkey = (nchildren > 1) ?
  1358. nilfs_btree_node_get_key(btree, node, nchildren - 2) : 0;
  1359. if (bh != NULL)
  1360. brelse(bh);
  1361. return (maxkey == key) && (nextmaxkey < NILFS_BMAP_LARGE_LOW);
  1362. }
  1363. static int nilfs_btree_gather_data(struct nilfs_bmap *bmap,
  1364. __u64 *keys, __u64 *ptrs, int nitems)
  1365. {
  1366. struct buffer_head *bh;
  1367. struct nilfs_btree *btree;
  1368. struct nilfs_btree_node *node, *root;
  1369. __le64 *dkeys;
  1370. __le64 *dptrs;
  1371. __u64 ptr;
  1372. int nchildren, i, ret;
  1373. btree = (struct nilfs_btree *)bmap;
  1374. root = nilfs_btree_get_root(btree);
  1375. switch (nilfs_btree_height(btree)) {
  1376. case 2:
  1377. bh = NULL;
  1378. node = root;
  1379. break;
  1380. case 3:
  1381. nchildren = nilfs_btree_node_get_nchildren(btree, root);
  1382. WARN_ON(nchildren > 1);
  1383. ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1);
  1384. ret = nilfs_btree_get_block(btree, ptr, &bh);
  1385. if (ret < 0)
  1386. return ret;
  1387. node = (struct nilfs_btree_node *)bh->b_data;
  1388. break;
  1389. default:
  1390. node = NULL;
  1391. return -EINVAL;
  1392. }
  1393. nchildren = nilfs_btree_node_get_nchildren(btree, node);
  1394. if (nchildren < nitems)
  1395. nitems = nchildren;
  1396. dkeys = nilfs_btree_node_dkeys(btree, node);
  1397. dptrs = nilfs_btree_node_dptrs(btree, node);
  1398. for (i = 0; i < nitems; i++) {
  1399. keys[i] = nilfs_bmap_dkey_to_key(dkeys[i]);
  1400. ptrs[i] = nilfs_bmap_dptr_to_ptr(dptrs[i]);
  1401. }
  1402. if (bh != NULL)
  1403. brelse(bh);
  1404. return nitems;
  1405. }
  1406. static int
  1407. nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
  1408. union nilfs_bmap_ptr_req *dreq,
  1409. union nilfs_bmap_ptr_req *nreq,
  1410. struct buffer_head **bhp,
  1411. struct nilfs_bmap_stats *stats)
  1412. {
  1413. struct buffer_head *bh;
  1414. struct nilfs_btree *btree;
  1415. int ret;
  1416. btree = (struct nilfs_btree *)bmap;
  1417. stats->bs_nblocks = 0;
  1418. /* for data */
  1419. /* cannot find near ptr */
  1420. if (NILFS_BMAP_USE_VBN(bmap))
  1421. dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key);
  1422. ret = nilfs_bmap_prepare_alloc_ptr(bmap, dreq);
  1423. if (ret < 0)
  1424. return ret;
  1425. *bhp = NULL;
  1426. stats->bs_nblocks++;
  1427. if (nreq != NULL) {
  1428. nreq->bpr_ptr = dreq->bpr_ptr + 1;
  1429. ret = nilfs_bmap_prepare_alloc_ptr(bmap, nreq);
  1430. if (ret < 0)
  1431. goto err_out_dreq;
  1432. ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh);
  1433. if (ret < 0)
  1434. goto err_out_nreq;
  1435. *bhp = bh;
  1436. stats->bs_nblocks++;
  1437. }
  1438. /* success */
  1439. return 0;
  1440. /* error */
  1441. err_out_nreq:
  1442. nilfs_bmap_abort_alloc_ptr(bmap, nreq);
  1443. err_out_dreq:
  1444. nilfs_bmap_abort_alloc_ptr(bmap, dreq);
  1445. stats->bs_nblocks = 0;
  1446. return ret;
  1447. }
  1448. static void
  1449. nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
  1450. __u64 key, __u64 ptr,
  1451. const __u64 *keys, const __u64 *ptrs,
  1452. int n,
  1453. union nilfs_bmap_ptr_req *dreq,
  1454. union nilfs_bmap_ptr_req *nreq,
  1455. struct buffer_head *bh)
  1456. {
  1457. struct nilfs_btree *btree;
  1458. struct nilfs_btree_node *node;
  1459. __u64 tmpptr;
  1460. /* free resources */
  1461. if (bmap->b_ops->bop_clear != NULL)
  1462. bmap->b_ops->bop_clear(bmap);
  1463. /* ptr must be a pointer to a buffer head. */
  1464. set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
  1465. /* convert and insert */
  1466. btree = (struct nilfs_btree *)bmap;
  1467. nilfs_btree_init(bmap);
  1468. if (nreq != NULL) {
  1469. nilfs_bmap_commit_alloc_ptr(bmap, dreq);
  1470. nilfs_bmap_commit_alloc_ptr(bmap, nreq);
  1471. /* create child node at level 1 */
  1472. lock_buffer(bh);
  1473. node = (struct nilfs_btree_node *)bh->b_data;
  1474. nilfs_btree_node_init(btree, node, 0, 1, n, keys, ptrs);
  1475. nilfs_btree_node_insert(btree, node,
  1476. key, dreq->bpr_ptr, n);
  1477. if (!buffer_dirty(bh))
  1478. nilfs_btnode_mark_dirty(bh);
  1479. if (!nilfs_bmap_dirty(bmap))
  1480. nilfs_bmap_set_dirty(bmap);
  1481. unlock_buffer(bh);
  1482. brelse(bh);
  1483. /* create root node at level 2 */
  1484. node = nilfs_btree_get_root(btree);
  1485. tmpptr = nreq->bpr_ptr;
  1486. nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT,
  1487. 2, 1, &keys[0], &tmpptr);
  1488. } else {
  1489. nilfs_bmap_commit_alloc_ptr(bmap, dreq);
  1490. /* create root node at level 1 */
  1491. node = nilfs_btree_get_root(btree);
  1492. nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT,
  1493. 1, n, keys, ptrs);
  1494. nilfs_btree_node_insert(btree, node,
  1495. key, dreq->bpr_ptr, n);
  1496. if (!nilfs_bmap_dirty(bmap))
  1497. nilfs_bmap_set_dirty(bmap);
  1498. }
  1499. if (NILFS_BMAP_USE_VBN(bmap))
  1500. nilfs_btree_set_target_v(btree, key, dreq->bpr_ptr);
  1501. }
  1502. /**
  1503. * nilfs_btree_convert_and_insert -
  1504. * @bmap:
  1505. * @key:
  1506. * @ptr:
  1507. * @keys:
  1508. * @ptrs:
  1509. * @n:
  1510. */
  1511. int nilfs_btree_convert_and_insert(struct nilfs_bmap *bmap,
  1512. __u64 key, __u64 ptr,
  1513. const __u64 *keys, const __u64 *ptrs, int n)
  1514. {
  1515. struct buffer_head *bh;
  1516. union nilfs_bmap_ptr_req dreq, nreq, *di, *ni;
  1517. struct nilfs_bmap_stats stats;
  1518. int ret;
  1519. if (n + 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX) {
  1520. di = &dreq;
  1521. ni = NULL;
  1522. } else if ((n + 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX(
  1523. 1 << bmap->b_inode->i_blkbits)) {
  1524. di = &dreq;
  1525. ni = &nreq;
  1526. } else {
  1527. di = NULL;
  1528. ni = NULL;
  1529. BUG();
  1530. }
  1531. ret = nilfs_btree_prepare_convert_and_insert(bmap, key, di, ni, &bh,
  1532. &stats);
  1533. if (ret < 0)
  1534. return ret;
  1535. nilfs_btree_commit_convert_and_insert(bmap, key, ptr, keys, ptrs, n,
  1536. di, ni, bh);
  1537. nilfs_bmap_add_blocks(bmap, stats.bs_nblocks);
  1538. return 0;
  1539. }
  1540. static int nilfs_btree_propagate_p(struct nilfs_btree *btree,
  1541. struct nilfs_btree_path *path,
  1542. int level,
  1543. struct buffer_head *bh)
  1544. {
  1545. while ((++level < nilfs_btree_height(btree) - 1) &&
  1546. !buffer_dirty(path[level].bp_bh))
  1547. nilfs_btnode_mark_dirty(path[level].bp_bh);
  1548. return 0;
  1549. }
  1550. static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
  1551. struct nilfs_btree_path *path,
  1552. int level)
  1553. {
  1554. struct nilfs_btree_node *parent;
  1555. int ret;
  1556. parent = nilfs_btree_get_node(btree, path, level + 1);
  1557. path[level].bp_oldreq.bpr_ptr =
  1558. nilfs_btree_node_get_ptr(btree, parent,
  1559. path[level + 1].bp_index);
  1560. path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1;
  1561. ret = nilfs_bmap_prepare_update_v(&btree->bt_bmap,
  1562. &path[level].bp_oldreq,
  1563. &path[level].bp_newreq);
  1564. if (ret < 0)
  1565. return ret;
  1566. if (buffer_nilfs_node(path[level].bp_bh)) {
  1567. path[level].bp_ctxt.oldkey = path[level].bp_oldreq.bpr_ptr;
  1568. path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
  1569. path[level].bp_ctxt.bh = path[level].bp_bh;
  1570. ret = nilfs_btnode_prepare_change_key(
  1571. &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
  1572. &path[level].bp_ctxt);
  1573. if (ret < 0) {
  1574. nilfs_bmap_abort_update_v(&btree->bt_bmap,
  1575. &path[level].bp_oldreq,
  1576. &path[level].bp_newreq);
  1577. return ret;
  1578. }
  1579. }
  1580. return 0;
  1581. }
  1582. static void nilfs_btree_commit_update_v(struct nilfs_btree *btree,
  1583. struct nilfs_btree_path *path,
  1584. int level)
  1585. {
  1586. struct nilfs_btree_node *parent;
  1587. nilfs_bmap_commit_update_v(&btree->bt_bmap,
  1588. &path[level].bp_oldreq,
  1589. &path[level].bp_newreq);
  1590. if (buffer_nilfs_node(path[level].bp_bh)) {
  1591. nilfs_btnode_commit_change_key(
  1592. &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
  1593. &path[level].bp_ctxt);
  1594. path[level].bp_bh = path[level].bp_ctxt.bh;
  1595. }
  1596. set_buffer_nilfs_volatile(path[level].bp_bh);
  1597. parent = nilfs_btree_get_node(btree, path, level + 1);
  1598. nilfs_btree_node_set_ptr(btree, parent, path[level + 1].bp_index,
  1599. path[level].bp_newreq.bpr_ptr);
  1600. }
  1601. static void nilfs_btree_abort_update_v(struct nilfs_btree *btree,
  1602. struct nilfs_btree_path *path,
  1603. int level)
  1604. {
  1605. nilfs_bmap_abort_update_v(&btree->bt_bmap,
  1606. &path[level].bp_oldreq,
  1607. &path[level].bp_newreq);
  1608. if (buffer_nilfs_node(path[level].bp_bh))
  1609. nilfs_btnode_abort_change_key(
  1610. &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
  1611. &path[level].bp_ctxt);
  1612. }
  1613. static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree,
  1614. struct nilfs_btree_path *path,
  1615. int minlevel,
  1616. int *maxlevelp)
  1617. {
  1618. int level, ret;
  1619. level = minlevel;
  1620. if (!buffer_nilfs_volatile(path[level].bp_bh)) {
  1621. ret = nilfs_btree_prepare_update_v(btree, path, level);
  1622. if (ret < 0)
  1623. return ret;
  1624. }
  1625. while ((++level < nilfs_btree_height(btree) - 1) &&
  1626. !buffer_dirty(path[level].bp_bh)) {
  1627. WARN_ON(buffer_nilfs_volatile(path[level].bp_bh));
  1628. ret = nilfs_btree_prepare_update_v(btree, path, level);
  1629. if (ret < 0)
  1630. goto out;
  1631. }
  1632. /* success */
  1633. *maxlevelp = level - 1;
  1634. return 0;
  1635. /* error */
  1636. out:
  1637. while (--level > minlevel)
  1638. nilfs_btree_abort_update_v(btree, path, level);
  1639. if (!buffer_nilfs_volatile(path[level].bp_bh))
  1640. nilfs_btree_abort_update_v(btree, path, level);
  1641. return ret;
  1642. }
  1643. static void nilfs_btree_commit_propagate_v(struct nilfs_btree *btree,
  1644. struct nilfs_btree_path *path,
  1645. int minlevel,
  1646. int maxlevel,
  1647. struct buffer_head *bh)
  1648. {
  1649. int level;
  1650. if (!buffer_nilfs_volatile(path[minlevel].bp_bh))
  1651. nilfs_btree_commit_update_v(btree, path, minlevel);
  1652. for (level = minlevel + 1; level <= maxlevel; level++)
  1653. nilfs_btree_commit_update_v(btree, path, level);
  1654. }
  1655. static int nilfs_btree_propagate_v(struct nilfs_btree *btree,
  1656. struct nilfs_btree_path *path,
  1657. int level,
  1658. struct buffer_head *bh)
  1659. {
  1660. int maxlevel, ret;
  1661. struct nilfs_btree_node *parent;
  1662. __u64 ptr;
  1663. get_bh(bh);
  1664. path[level].bp_bh = bh;
  1665. ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel);
  1666. if (ret < 0)
  1667. goto out;
  1668. if (buffer_nilfs_volatile(path[level].bp_bh)) {
  1669. parent = nilfs_btree_get_node(btree, path, level + 1);
  1670. ptr = nilfs_btree_node_get_ptr(btree, parent,
  1671. path[level + 1].bp_index);
  1672. ret = nilfs_bmap_mark_dirty(&btree->bt_bmap, ptr);
  1673. if (ret < 0)
  1674. goto out;
  1675. }
  1676. nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh);
  1677. out:
  1678. brelse(path[level].bp_bh);
  1679. path[level].bp_bh = NULL;
  1680. return ret;
  1681. }
  1682. static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
  1683. struct buffer_head *bh)
  1684. {
  1685. struct nilfs_btree *btree;
  1686. struct nilfs_btree_path *path;
  1687. struct nilfs_btree_node *node;
  1688. __u64 key;
  1689. int level, ret;
  1690. WARN_ON(!buffer_dirty(bh));
  1691. btree = (struct nilfs_btree *)bmap;
  1692. path = nilfs_btree_alloc_path(btree);
  1693. if (path == NULL)
  1694. return -ENOMEM;
  1695. nilfs_btree_init_path(btree, path);
  1696. if (buffer_nilfs_node(bh)) {
  1697. node = (struct nilfs_btree_node *)bh->b_data;
  1698. key = nilfs_btree_node_get_key(btree, node, 0);
  1699. level = nilfs_btree_node_get_level(btree, node);
  1700. } else {
  1701. key = nilfs_bmap_data_get_key(bmap, bh);
  1702. level = NILFS_BTREE_LEVEL_DATA;
  1703. }
  1704. ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1);
  1705. if (ret < 0) {
  1706. if (unlikely(ret == -ENOENT))
  1707. printk(KERN_CRIT "%s: key = %llu, level == %d\n",
  1708. __func__, (unsigned long long)key, level);
  1709. goto out;
  1710. }
  1711. ret = NILFS_BMAP_USE_VBN(bmap) ?
  1712. nilfs_btree_propagate_v(btree, path, level, bh) :
  1713. nilfs_btree_propagate_p(btree, path, level, bh);
  1714. out:
  1715. nilfs_btree_clear_path(btree, path);
  1716. nilfs_btree_free_path(btree, path);
  1717. return ret;
  1718. }
  1719. static int nilfs_btree_propagate_gc(const struct nilfs_bmap *bmap,
  1720. struct buffer_head *bh)
  1721. {
  1722. return nilfs_bmap_mark_dirty(bmap, bh->b_blocknr);
  1723. }
  1724. static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree,
  1725. struct list_head *lists,
  1726. struct buffer_head *bh)
  1727. {
  1728. struct list_head *head;
  1729. struct buffer_head *cbh;
  1730. struct nilfs_btree_node *node, *cnode;
  1731. __u64 key, ckey;
  1732. int level;
  1733. get_bh(bh);
  1734. node = (struct nilfs_btree_node *)bh->b_data;
  1735. key = nilfs_btree_node_get_key(btree, node, 0);
  1736. level = nilfs_btree_node_get_level(btree, node);
  1737. list_for_each(head, &lists[level]) {
  1738. cbh = list_entry(head, struct buffer_head, b_assoc_buffers);
  1739. cnode = (struct nilfs_btree_node *)cbh->b_data;
  1740. ckey = nilfs_btree_node_get_key(btree, cnode, 0);
  1741. if (key < ckey)
  1742. break;
  1743. }
  1744. list_add_tail(&bh->b_assoc_buffers, head);
  1745. }
  1746. static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *bmap,
  1747. struct list_head *listp)
  1748. {
  1749. struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
  1750. struct address_space *btcache = &NILFS_BMAP_I(bmap)->i_btnode_cache;
  1751. struct list_head lists[NILFS_BTREE_LEVEL_MAX];
  1752. struct pagevec pvec;
  1753. struct buffer_head *bh, *head;
  1754. pgoff_t index = 0;
  1755. int level, i;
  1756. for (level = NILFS_BTREE_LEVEL_NODE_MIN;
  1757. level < NILFS_BTREE_LEVEL_MAX;
  1758. level++)
  1759. INIT_LIST_HEAD(&lists[level]);
  1760. pagevec_init(&pvec, 0);
  1761. while (pagevec_lookup_tag(&pvec, btcache, &index, PAGECACHE_TAG_DIRTY,
  1762. PAGEVEC_SIZE)) {
  1763. for (i = 0; i < pagevec_count(&pvec); i++) {
  1764. bh = head = page_buffers(pvec.pages[i]);
  1765. do {
  1766. if (buffer_dirty(bh))
  1767. nilfs_btree_add_dirty_buffer(btree,
  1768. lists, bh);
  1769. } while ((bh = bh->b_this_page) != head);
  1770. }
  1771. pagevec_release(&pvec);
  1772. cond_resched();
  1773. }
  1774. for (level = NILFS_BTREE_LEVEL_NODE_MIN;
  1775. level < NILFS_BTREE_LEVEL_MAX;
  1776. level++)
  1777. list_splice(&lists[level], listp->prev);
  1778. }
  1779. static int nilfs_btree_assign_p(struct nilfs_btree *btree,
  1780. struct nilfs_btree_path *path,
  1781. int level,
  1782. struct buffer_head **bh,
  1783. sector_t blocknr,
  1784. union nilfs_binfo *binfo)
  1785. {
  1786. struct nilfs_btree_node *parent;
  1787. __u64 key;
  1788. __u64 ptr;
  1789. int ret;
  1790. parent = nilfs_btree_get_node(btree, path, level + 1);
  1791. ptr = nilfs_btree_node_get_ptr(btree, parent,
  1792. path[level + 1].bp_index);
  1793. if (buffer_nilfs_node(*bh)) {
  1794. path[level].bp_ctxt.oldkey = ptr;
  1795. path[level].bp_ctxt.newkey = blocknr;
  1796. path[level].bp_ctxt.bh = *bh;
  1797. ret = nilfs_btnode_prepare_change_key(
  1798. &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
  1799. &path[level].bp_ctxt);
  1800. if (ret < 0)
  1801. return ret;
  1802. nilfs_btnode_commit_change_key(
  1803. &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
  1804. &path[level].bp_ctxt);
  1805. *bh = path[level].bp_ctxt.bh;
  1806. }
  1807. nilfs_btree_node_set_ptr(btree, parent,
  1808. path[level + 1].bp_index, blocknr);
  1809. key = nilfs_btree_node_get_key(btree, parent,
  1810. path[level + 1].bp_index);
  1811. /* on-disk format */
  1812. binfo->bi_dat.bi_blkoff = nilfs_bmap_key_to_dkey(key);
  1813. binfo->bi_dat.bi_level = level;
  1814. return 0;
  1815. }
  1816. static int nilfs_btree_assign_v(struct nilfs_btree *btree,
  1817. struct nilfs_btree_path *path,
  1818. int level,
  1819. struct buffer_head **bh,
  1820. sector_t blocknr,
  1821. union nilfs_binfo *binfo)
  1822. {
  1823. struct nilfs_btree_node *parent;
  1824. __u64 key;
  1825. __u64 ptr;
  1826. union nilfs_bmap_ptr_req req;
  1827. int ret;
  1828. parent = nilfs_btree_get_node(btree, path, level + 1);
  1829. ptr = nilfs_btree_node_get_ptr(btree, parent,
  1830. path[level + 1].bp_index);
  1831. req.bpr_ptr = ptr;
  1832. ret = nilfs_bmap_start_v(&btree->bt_bmap, &req, blocknr);
  1833. if (unlikely(ret < 0))
  1834. return ret;
  1835. key = nilfs_btree_node_get_key(btree, parent,
  1836. path[level + 1].bp_index);
  1837. /* on-disk format */
  1838. binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr);
  1839. binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key);
  1840. return 0;
  1841. }
  1842. static int nilfs_btree_assign(struct nilfs_bmap *bmap,
  1843. struct buffer_head **bh,
  1844. sector_t blocknr,
  1845. union nilfs_binfo *binfo)
  1846. {
  1847. struct nilfs_btree *btree;
  1848. struct nilfs_btree_path *path;
  1849. struct nilfs_btree_node *node;
  1850. __u64 key;
  1851. int level, ret;
  1852. btree = (struct nilfs_btree *)bmap;
  1853. path = nilfs_btree_alloc_path(btree);
  1854. if (path == NULL)
  1855. return -ENOMEM;
  1856. nilfs_btree_init_path(btree, path);
  1857. if (buffer_nilfs_node(*bh)) {
  1858. node = (struct nilfs_btree_node *)(*bh)->b_data;
  1859. key = nilfs_btree_node_get_key(btree, node, 0);
  1860. level = nilfs_btree_node_get_level(btree, node);
  1861. } else {
  1862. key = nilfs_bmap_data_get_key(bmap, *bh);
  1863. level = NILFS_BTREE_LEVEL_DATA;
  1864. }
  1865. ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1);
  1866. if (ret < 0) {
  1867. WARN_ON(ret == -ENOENT);
  1868. goto out;
  1869. }
  1870. ret = NILFS_BMAP_USE_VBN(bmap) ?
  1871. nilfs_btree_assign_v(btree, path, level, bh, blocknr, binfo) :
  1872. nilfs_btree_assign_p(btree, path, level, bh, blocknr, binfo);
  1873. out:
  1874. nilfs_btree_clear_path(btree, path);
  1875. nilfs_btree_free_path(btree, path);
  1876. return ret;
  1877. }
  1878. static int nilfs_btree_assign_gc(struct nilfs_bmap *bmap,
  1879. struct buffer_head **bh,
  1880. sector_t blocknr,
  1881. union nilfs_binfo *binfo)
  1882. {
  1883. struct nilfs_btree *btree;
  1884. struct nilfs_btree_node *node;
  1885. __u64 key;
  1886. int ret;
  1887. btree = (struct nilfs_btree *)bmap;
  1888. ret = nilfs_bmap_move_v(bmap, (*bh)->b_blocknr, blocknr);
  1889. if (ret < 0)
  1890. return ret;
  1891. if (buffer_nilfs_node(*bh)) {
  1892. node = (struct nilfs_btree_node *)(*bh)->b_data;
  1893. key = nilfs_btree_node_get_key(btree, node, 0);
  1894. } else
  1895. key = nilfs_bmap_data_get_key(bmap, *bh);
  1896. /* on-disk format */
  1897. binfo->bi_v.bi_vblocknr = cpu_to_le64((*bh)->b_blocknr);
  1898. binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key);
  1899. return 0;
  1900. }
  1901. static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level)
  1902. {
  1903. struct buffer_head *bh;
  1904. struct nilfs_btree *btree;
  1905. struct nilfs_btree_path *path;
  1906. __u64 ptr;
  1907. int ret;
  1908. btree = (struct nilfs_btree *)bmap;
  1909. path = nilfs_btree_alloc_path(btree);
  1910. if (path == NULL)
  1911. return -ENOMEM;
  1912. nilfs_btree_init_path(btree, path);
  1913. ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1);
  1914. if (ret < 0) {
  1915. WARN_ON(ret == -ENOENT);
  1916. goto out;
  1917. }
  1918. ret = nilfs_btree_get_block(btree, ptr, &bh);
  1919. if (ret < 0) {
  1920. WARN_ON(ret == -ENOENT);
  1921. goto out;
  1922. }
  1923. if (!buffer_dirty(bh))
  1924. nilfs_btnode_mark_dirty(bh);
  1925. brelse(bh);
  1926. if (!nilfs_bmap_dirty(&btree->bt_bmap))
  1927. nilfs_bmap_set_dirty(&btree->bt_bmap);
  1928. out:
  1929. nilfs_btree_clear_path(btree, path);
  1930. nilfs_btree_free_path(btree, path);
  1931. return ret;
  1932. }
  1933. static const struct nilfs_bmap_operations nilfs_btree_ops = {
  1934. .bop_lookup = nilfs_btree_lookup,
  1935. .bop_lookup_contig = nilfs_btree_lookup_contig,
  1936. .bop_insert = nilfs_btree_insert,
  1937. .bop_delete = nilfs_btree_delete,
  1938. .bop_clear = NULL,
  1939. .bop_propagate = nilfs_btree_propagate,
  1940. .bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers,
  1941. .bop_assign = nilfs_btree_assign,
  1942. .bop_mark = nilfs_btree_mark,
  1943. .bop_last_key = nilfs_btree_last_key,
  1944. .bop_check_insert = NULL,
  1945. .bop_check_delete = nilfs_btree_check_delete,
  1946. .bop_gather_data = nilfs_btree_gather_data,
  1947. };
  1948. static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
  1949. .bop_lookup = NULL,
  1950. .bop_lookup_contig = NULL,
  1951. .bop_insert = NULL,
  1952. .bop_delete = NULL,
  1953. .bop_clear = NULL,
  1954. .bop_propagate = nilfs_btree_propagate_gc,
  1955. .bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers,
  1956. .bop_assign = nilfs_btree_assign_gc,
  1957. .bop_mark = NULL,
  1958. .bop_last_key = NULL,
  1959. .bop_check_insert = NULL,
  1960. .bop_check_delete = NULL,
  1961. .bop_gather_data = NULL,
  1962. };
  1963. int nilfs_btree_init(struct nilfs_bmap *bmap)
  1964. {
  1965. bmap->b_ops = &nilfs_btree_ops;
  1966. return 0;
  1967. }
  1968. void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
  1969. {
  1970. bmap->b_ops = &nilfs_btree_ops_gc;
  1971. }