ctree.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include "ctree.h"
  20. #include "disk-io.h"
  21. #include "transaction.h"
  22. #include "print-tree.h"
  23. static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
  24. *root, struct btrfs_path *path, int level);
  25. static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
  26. *root, struct btrfs_key *ins_key,
  27. struct btrfs_path *path, int data_size, int extend);
  28. static int push_node_left(struct btrfs_trans_handle *trans,
  29. struct btrfs_root *root, struct extent_buffer *dst,
  30. struct extent_buffer *src);
  31. static int balance_node_right(struct btrfs_trans_handle *trans,
  32. struct btrfs_root *root,
  33. struct extent_buffer *dst_buf,
  34. struct extent_buffer *src_buf);
  35. static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  36. struct btrfs_path *path, int level, int slot);
  37. inline void btrfs_init_path(struct btrfs_path *p)
  38. {
  39. memset(p, 0, sizeof(*p));
  40. }
  41. struct btrfs_path *btrfs_alloc_path(void)
  42. {
  43. struct btrfs_path *path;
  44. path = kmem_cache_alloc(btrfs_path_cachep, GFP_NOFS);
  45. if (path) {
  46. btrfs_init_path(path);
  47. path->reada = 1;
  48. }
  49. return path;
  50. }
  51. void btrfs_free_path(struct btrfs_path *p)
  52. {
  53. btrfs_release_path(NULL, p);
  54. kmem_cache_free(btrfs_path_cachep, p);
  55. }
  56. void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
  57. {
  58. int i;
  59. for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
  60. if (!p->nodes[i])
  61. break;
  62. free_extent_buffer(p->nodes[i]);
  63. }
  64. memset(p, 0, sizeof(*p));
  65. }
  66. static int __btrfs_cow_block(struct btrfs_trans_handle *trans,
  67. struct btrfs_root *root,
  68. struct extent_buffer *buf,
  69. struct extent_buffer *parent, int parent_slot,
  70. struct extent_buffer **cow_ret,
  71. u64 search_start, u64 empty_size)
  72. {
  73. struct extent_buffer *cow;
  74. int ret = 0;
  75. int different_trans = 0;
  76. WARN_ON(root->ref_cows && trans->transid != root->last_trans);
  77. cow = btrfs_alloc_free_block(trans, root, buf->len,
  78. search_start, empty_size);
  79. if (IS_ERR(cow))
  80. return PTR_ERR(cow);
  81. copy_extent_buffer(cow, buf, 0, 0, cow->len);
  82. btrfs_set_header_bytenr(cow, cow->start);
  83. btrfs_set_header_generation(cow, trans->transid);
  84. btrfs_set_header_owner(cow, root->root_key.objectid);
  85. WARN_ON(btrfs_header_generation(buf) > trans->transid);
  86. if (btrfs_header_generation(buf) != trans->transid) {
  87. different_trans = 1;
  88. ret = btrfs_inc_ref(trans, root, buf);
  89. if (ret)
  90. return ret;
  91. } else {
  92. clean_tree_block(trans, root, buf);
  93. }
  94. if (buf == root->node) {
  95. root->node = cow;
  96. extent_buffer_get(cow);
  97. if (buf != root->commit_root) {
  98. btrfs_free_extent(trans, root, buf->start,
  99. buf->len, 1);
  100. }
  101. free_extent_buffer(buf);
  102. } else {
  103. btrfs_set_node_blockptr(parent, parent_slot,
  104. cow->start);
  105. btrfs_mark_buffer_dirty(parent);
  106. WARN_ON(btrfs_header_generation(parent) != trans->transid);
  107. btrfs_free_extent(trans, root, buf->start, buf->len, 1);
  108. }
  109. free_extent_buffer(buf);
  110. btrfs_mark_buffer_dirty(cow);
  111. *cow_ret = cow;
  112. return 0;
  113. }
  114. int btrfs_cow_block(struct btrfs_trans_handle *trans,
  115. struct btrfs_root *root, struct extent_buffer *buf,
  116. struct extent_buffer *parent, int parent_slot,
  117. struct extent_buffer **cow_ret)
  118. {
  119. u64 search_start;
  120. int ret;
  121. if (trans->transaction != root->fs_info->running_transaction) {
  122. printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
  123. root->fs_info->running_transaction->transid);
  124. WARN_ON(1);
  125. }
  126. if (trans->transid != root->fs_info->generation) {
  127. printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
  128. root->fs_info->generation);
  129. WARN_ON(1);
  130. }
  131. if (btrfs_header_generation(buf) == trans->transid) {
  132. *cow_ret = buf;
  133. return 0;
  134. }
  135. search_start = buf->start & ~((u64)BTRFS_BLOCK_GROUP_SIZE - 1);
  136. ret = __btrfs_cow_block(trans, root, buf, parent,
  137. parent_slot, cow_ret, search_start, 0);
  138. return ret;
  139. }
  140. static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
  141. {
  142. if (blocknr < other && other - (blocknr + blocksize) < 32768)
  143. return 1;
  144. if (blocknr > other && blocknr - (other + blocksize) < 32768)
  145. return 1;
  146. return 0;
  147. }
  148. /*
  149. * compare two keys in a memcmp fashion
  150. */
  151. static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
  152. {
  153. struct btrfs_key k1;
  154. btrfs_disk_key_to_cpu(&k1, disk);
  155. if (k1.objectid > k2->objectid)
  156. return 1;
  157. if (k1.objectid < k2->objectid)
  158. return -1;
  159. if (k1.type > k2->type)
  160. return 1;
  161. if (k1.type < k2->type)
  162. return -1;
  163. if (k1.offset > k2->offset)
  164. return 1;
  165. if (k1.offset < k2->offset)
  166. return -1;
  167. return 0;
  168. }
  169. int btrfs_realloc_node(struct btrfs_trans_handle *trans,
  170. struct btrfs_root *root, struct extent_buffer *parent,
  171. int start_slot, int cache_only, u64 *last_ret,
  172. struct btrfs_key *progress)
  173. {
  174. struct extent_buffer *cur;
  175. struct extent_buffer *tmp;
  176. u64 blocknr;
  177. u64 search_start = *last_ret;
  178. u64 last_block = 0;
  179. u64 other;
  180. u32 parent_nritems;
  181. int end_slot;
  182. int i;
  183. int err = 0;
  184. int parent_level;
  185. int uptodate;
  186. u32 blocksize;
  187. int progress_passed = 0;
  188. struct btrfs_disk_key disk_key;
  189. parent_level = btrfs_header_level(parent);
  190. if (cache_only && parent_level != 1)
  191. return 0;
  192. if (trans->transaction != root->fs_info->running_transaction) {
  193. printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
  194. root->fs_info->running_transaction->transid);
  195. WARN_ON(1);
  196. }
  197. if (trans->transid != root->fs_info->generation) {
  198. printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
  199. root->fs_info->generation);
  200. WARN_ON(1);
  201. }
  202. parent_nritems = btrfs_header_nritems(parent);
  203. blocksize = btrfs_level_size(root, parent_level - 1);
  204. end_slot = parent_nritems;
  205. if (parent_nritems == 1)
  206. return 0;
  207. for (i = start_slot; i < end_slot; i++) {
  208. int close = 1;
  209. if (!parent->map_token) {
  210. map_extent_buffer(parent,
  211. btrfs_node_key_ptr_offset(i),
  212. sizeof(struct btrfs_key_ptr),
  213. &parent->map_token, &parent->kaddr,
  214. &parent->map_start, &parent->map_len,
  215. KM_USER1);
  216. }
  217. btrfs_node_key(parent, &disk_key, i);
  218. if (!progress_passed && comp_keys(&disk_key, progress) < 0)
  219. continue;
  220. progress_passed = 1;
  221. blocknr = btrfs_node_blockptr(parent, i);
  222. if (last_block == 0)
  223. last_block = blocknr;
  224. if (i > 0) {
  225. other = btrfs_node_blockptr(parent, i - 1);
  226. close = close_blocks(blocknr, other, blocksize);
  227. }
  228. if (close && i < end_slot - 2) {
  229. other = btrfs_node_blockptr(parent, i + 1);
  230. close = close_blocks(blocknr, other, blocksize);
  231. }
  232. if (close) {
  233. last_block = blocknr;
  234. continue;
  235. }
  236. if (parent->map_token) {
  237. unmap_extent_buffer(parent, parent->map_token,
  238. KM_USER1);
  239. parent->map_token = NULL;
  240. }
  241. cur = btrfs_find_tree_block(root, blocknr, blocksize);
  242. if (cur)
  243. uptodate = btrfs_buffer_uptodate(cur);
  244. else
  245. uptodate = 0;
  246. if (!cur || !uptodate) {
  247. if (cache_only) {
  248. free_extent_buffer(cur);
  249. continue;
  250. }
  251. if (!cur) {
  252. cur = read_tree_block(root, blocknr,
  253. blocksize);
  254. } else if (!uptodate) {
  255. btrfs_read_buffer(cur);
  256. }
  257. }
  258. if (search_start == 0)
  259. search_start = last_block;
  260. err = __btrfs_cow_block(trans, root, cur, parent, i,
  261. &tmp, search_start,
  262. min(16 * blocksize,
  263. (end_slot - i) * blocksize));
  264. if (err) {
  265. free_extent_buffer(cur);
  266. break;
  267. }
  268. search_start = tmp->start;
  269. last_block = tmp->start;
  270. *last_ret = search_start;
  271. if (parent_level == 1)
  272. btrfs_clear_buffer_defrag(tmp);
  273. free_extent_buffer(tmp);
  274. }
  275. if (parent->map_token) {
  276. unmap_extent_buffer(parent, parent->map_token,
  277. KM_USER1);
  278. parent->map_token = NULL;
  279. }
  280. return err;
  281. }
  282. /*
  283. * The leaf data grows from end-to-front in the node.
  284. * this returns the address of the start of the last item,
  285. * which is the stop of the leaf data stack
  286. */
  287. static inline unsigned int leaf_data_end(struct btrfs_root *root,
  288. struct extent_buffer *leaf)
  289. {
  290. u32 nr = btrfs_header_nritems(leaf);
  291. if (nr == 0)
  292. return BTRFS_LEAF_DATA_SIZE(root);
  293. return btrfs_item_offset_nr(leaf, nr - 1);
  294. }
  295. static int check_node(struct btrfs_root *root, struct btrfs_path *path,
  296. int level)
  297. {
  298. struct extent_buffer *parent = NULL;
  299. struct extent_buffer *node = path->nodes[level];
  300. struct btrfs_disk_key parent_key;
  301. struct btrfs_disk_key node_key;
  302. int parent_slot;
  303. int slot;
  304. struct btrfs_key cpukey;
  305. u32 nritems = btrfs_header_nritems(node);
  306. if (path->nodes[level + 1])
  307. parent = path->nodes[level + 1];
  308. slot = path->slots[level];
  309. BUG_ON(nritems == 0);
  310. if (parent) {
  311. parent_slot = path->slots[level + 1];
  312. btrfs_node_key(parent, &parent_key, parent_slot);
  313. btrfs_node_key(node, &node_key, 0);
  314. BUG_ON(memcmp(&parent_key, &node_key,
  315. sizeof(struct btrfs_disk_key)));
  316. BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
  317. btrfs_header_bytenr(node));
  318. }
  319. BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
  320. if (slot != 0) {
  321. btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
  322. btrfs_node_key(node, &node_key, slot);
  323. BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
  324. }
  325. if (slot < nritems - 1) {
  326. btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
  327. btrfs_node_key(node, &node_key, slot);
  328. BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
  329. }
  330. return 0;
  331. }
  332. static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
  333. int level)
  334. {
  335. struct extent_buffer *leaf = path->nodes[level];
  336. struct extent_buffer *parent = NULL;
  337. int parent_slot;
  338. struct btrfs_key cpukey;
  339. struct btrfs_disk_key parent_key;
  340. struct btrfs_disk_key leaf_key;
  341. int slot = path->slots[0];
  342. u32 nritems = btrfs_header_nritems(leaf);
  343. if (path->nodes[level + 1])
  344. parent = path->nodes[level + 1];
  345. if (nritems == 0)
  346. return 0;
  347. if (parent) {
  348. parent_slot = path->slots[level + 1];
  349. btrfs_node_key(parent, &parent_key, parent_slot);
  350. btrfs_item_key(leaf, &leaf_key, 0);
  351. BUG_ON(memcmp(&parent_key, &leaf_key,
  352. sizeof(struct btrfs_disk_key)));
  353. BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
  354. btrfs_header_bytenr(leaf));
  355. }
  356. #if 0
  357. for (i = 0; nritems > 1 && i < nritems - 2; i++) {
  358. btrfs_item_key_to_cpu(leaf, &cpukey, i + 1);
  359. btrfs_item_key(leaf, &leaf_key, i);
  360. if (comp_keys(&leaf_key, &cpukey) >= 0) {
  361. btrfs_print_leaf(root, leaf);
  362. printk("slot %d offset bad key\n", i);
  363. BUG_ON(1);
  364. }
  365. if (btrfs_item_offset_nr(leaf, i) !=
  366. btrfs_item_end_nr(leaf, i + 1)) {
  367. btrfs_print_leaf(root, leaf);
  368. printk("slot %d offset bad\n", i);
  369. BUG_ON(1);
  370. }
  371. if (i == 0) {
  372. if (btrfs_item_offset_nr(leaf, i) +
  373. btrfs_item_size_nr(leaf, i) !=
  374. BTRFS_LEAF_DATA_SIZE(root)) {
  375. btrfs_print_leaf(root, leaf);
  376. printk("slot %d first offset bad\n", i);
  377. BUG_ON(1);
  378. }
  379. }
  380. }
  381. if (nritems > 0) {
  382. if (btrfs_item_size_nr(leaf, nritems - 1) > 4096) {
  383. btrfs_print_leaf(root, leaf);
  384. printk("slot %d bad size \n", nritems - 1);
  385. BUG_ON(1);
  386. }
  387. }
  388. #endif
  389. if (slot != 0 && slot < nritems - 1) {
  390. btrfs_item_key(leaf, &leaf_key, slot);
  391. btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
  392. if (comp_keys(&leaf_key, &cpukey) <= 0) {
  393. btrfs_print_leaf(root, leaf);
  394. printk("slot %d offset bad key\n", slot);
  395. BUG_ON(1);
  396. }
  397. if (btrfs_item_offset_nr(leaf, slot - 1) !=
  398. btrfs_item_end_nr(leaf, slot)) {
  399. btrfs_print_leaf(root, leaf);
  400. printk("slot %d offset bad\n", slot);
  401. BUG_ON(1);
  402. }
  403. }
  404. if (slot < nritems - 1) {
  405. btrfs_item_key(leaf, &leaf_key, slot);
  406. btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
  407. BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
  408. if (btrfs_item_offset_nr(leaf, slot) !=
  409. btrfs_item_end_nr(leaf, slot + 1)) {
  410. btrfs_print_leaf(root, leaf);
  411. printk("slot %d offset bad\n", slot);
  412. BUG_ON(1);
  413. }
  414. }
  415. BUG_ON(btrfs_item_offset_nr(leaf, 0) +
  416. btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
  417. return 0;
  418. }
  419. static int check_block(struct btrfs_root *root, struct btrfs_path *path,
  420. int level)
  421. {
  422. return 0;
  423. #if 0
  424. struct extent_buffer *buf = path->nodes[level];
  425. if (memcmp_extent_buffer(buf, root->fs_info->fsid,
  426. (unsigned long)btrfs_header_fsid(buf),
  427. BTRFS_FSID_SIZE)) {
  428. printk("warning bad block %Lu\n", buf->start);
  429. return 1;
  430. }
  431. #endif
  432. if (level == 0)
  433. return check_leaf(root, path, level);
  434. return check_node(root, path, level);
  435. }
  436. /*
  437. * search for key in the extent_buffer. The items start at offset p,
  438. * and they are item_size apart. There are 'max' items in p.
  439. *
  440. * the slot in the array is returned via slot, and it points to
  441. * the place where you would insert key if it is not found in
  442. * the array.
  443. *
  444. * slot may point to max if the key is bigger than all of the keys
  445. */
  446. static int generic_bin_search(struct extent_buffer *eb, unsigned long p,
  447. int item_size, struct btrfs_key *key,
  448. int max, int *slot)
  449. {
  450. int low = 0;
  451. int high = max;
  452. int mid;
  453. int ret;
  454. struct btrfs_disk_key *tmp = NULL;
  455. struct btrfs_disk_key unaligned;
  456. unsigned long offset;
  457. char *map_token = NULL;
  458. char *kaddr = NULL;
  459. unsigned long map_start = 0;
  460. unsigned long map_len = 0;
  461. int err;
  462. while(low < high) {
  463. mid = (low + high) / 2;
  464. offset = p + mid * item_size;
  465. if (!map_token || offset < map_start ||
  466. (offset + sizeof(struct btrfs_disk_key)) >
  467. map_start + map_len) {
  468. if (map_token) {
  469. unmap_extent_buffer(eb, map_token, KM_USER0);
  470. map_token = NULL;
  471. }
  472. err = map_extent_buffer(eb, offset,
  473. sizeof(struct btrfs_disk_key),
  474. &map_token, &kaddr,
  475. &map_start, &map_len, KM_USER0);
  476. if (!err) {
  477. tmp = (struct btrfs_disk_key *)(kaddr + offset -
  478. map_start);
  479. } else {
  480. read_extent_buffer(eb, &unaligned,
  481. offset, sizeof(unaligned));
  482. tmp = &unaligned;
  483. }
  484. } else {
  485. tmp = (struct btrfs_disk_key *)(kaddr + offset -
  486. map_start);
  487. }
  488. ret = comp_keys(tmp, key);
  489. if (ret < 0)
  490. low = mid + 1;
  491. else if (ret > 0)
  492. high = mid;
  493. else {
  494. *slot = mid;
  495. if (map_token)
  496. unmap_extent_buffer(eb, map_token, KM_USER0);
  497. return 0;
  498. }
  499. }
  500. *slot = low;
  501. if (map_token)
  502. unmap_extent_buffer(eb, map_token, KM_USER0);
  503. return 1;
  504. }
  505. /*
  506. * simple bin_search frontend that does the right thing for
  507. * leaves vs nodes
  508. */
  509. static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
  510. int level, int *slot)
  511. {
  512. if (level == 0) {
  513. return generic_bin_search(eb,
  514. offsetof(struct btrfs_leaf, items),
  515. sizeof(struct btrfs_item),
  516. key, btrfs_header_nritems(eb),
  517. slot);
  518. } else {
  519. return generic_bin_search(eb,
  520. offsetof(struct btrfs_node, ptrs),
  521. sizeof(struct btrfs_key_ptr),
  522. key, btrfs_header_nritems(eb),
  523. slot);
  524. }
  525. return -1;
  526. }
  527. static struct extent_buffer *read_node_slot(struct btrfs_root *root,
  528. struct extent_buffer *parent, int slot)
  529. {
  530. if (slot < 0)
  531. return NULL;
  532. if (slot >= btrfs_header_nritems(parent))
  533. return NULL;
  534. return read_tree_block(root, btrfs_node_blockptr(parent, slot),
  535. btrfs_level_size(root, btrfs_header_level(parent) - 1));
  536. }
  537. static int balance_level(struct btrfs_trans_handle *trans, struct btrfs_root
  538. *root, struct btrfs_path *path, int level)
  539. {
  540. struct extent_buffer *right = NULL;
  541. struct extent_buffer *mid;
  542. struct extent_buffer *left = NULL;
  543. struct extent_buffer *parent = NULL;
  544. int ret = 0;
  545. int wret;
  546. int pslot;
  547. int orig_slot = path->slots[level];
  548. int err_on_enospc = 0;
  549. u64 orig_ptr;
  550. if (level == 0)
  551. return 0;
  552. mid = path->nodes[level];
  553. orig_ptr = btrfs_node_blockptr(mid, orig_slot);
  554. if (level < BTRFS_MAX_LEVEL - 1)
  555. parent = path->nodes[level + 1];
  556. pslot = path->slots[level + 1];
  557. /*
  558. * deal with the case where there is only one pointer in the root
  559. * by promoting the node below to a root
  560. */
  561. if (!parent) {
  562. struct extent_buffer *child;
  563. if (btrfs_header_nritems(mid) != 1)
  564. return 0;
  565. /* promote the child to a root */
  566. child = read_node_slot(root, mid, 0);
  567. BUG_ON(!child);
  568. root->node = child;
  569. path->nodes[level] = NULL;
  570. clean_tree_block(trans, root, mid);
  571. wait_on_tree_block_writeback(root, mid);
  572. /* once for the path */
  573. free_extent_buffer(mid);
  574. ret = btrfs_free_extent(trans, root, mid->start, mid->len, 1);
  575. /* once for the root ptr */
  576. free_extent_buffer(mid);
  577. return ret;
  578. }
  579. if (btrfs_header_nritems(mid) >
  580. BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
  581. return 0;
  582. if (btrfs_header_nritems(mid) < 2)
  583. err_on_enospc = 1;
  584. left = read_node_slot(root, parent, pslot - 1);
  585. if (left) {
  586. wret = btrfs_cow_block(trans, root, left,
  587. parent, pslot - 1, &left);
  588. if (wret) {
  589. ret = wret;
  590. goto enospc;
  591. }
  592. }
  593. right = read_node_slot(root, parent, pslot + 1);
  594. if (right) {
  595. wret = btrfs_cow_block(trans, root, right,
  596. parent, pslot + 1, &right);
  597. if (wret) {
  598. ret = wret;
  599. goto enospc;
  600. }
  601. }
  602. /* first, try to make some room in the middle buffer */
  603. if (left) {
  604. orig_slot += btrfs_header_nritems(left);
  605. wret = push_node_left(trans, root, left, mid);
  606. if (wret < 0)
  607. ret = wret;
  608. if (btrfs_header_nritems(mid) < 2)
  609. err_on_enospc = 1;
  610. }
  611. /*
  612. * then try to empty the right most buffer into the middle
  613. */
  614. if (right) {
  615. wret = push_node_left(trans, root, mid, right);
  616. if (wret < 0 && wret != -ENOSPC)
  617. ret = wret;
  618. if (btrfs_header_nritems(right) == 0) {
  619. u64 bytenr = right->start;
  620. u32 blocksize = right->len;
  621. clean_tree_block(trans, root, right);
  622. wait_on_tree_block_writeback(root, right);
  623. free_extent_buffer(right);
  624. right = NULL;
  625. wret = del_ptr(trans, root, path, level + 1, pslot +
  626. 1);
  627. if (wret)
  628. ret = wret;
  629. wret = btrfs_free_extent(trans, root, bytenr,
  630. blocksize, 1);
  631. if (wret)
  632. ret = wret;
  633. } else {
  634. struct btrfs_disk_key right_key;
  635. btrfs_node_key(right, &right_key, 0);
  636. btrfs_set_node_key(parent, &right_key, pslot + 1);
  637. btrfs_mark_buffer_dirty(parent);
  638. }
  639. }
  640. if (btrfs_header_nritems(mid) == 1) {
  641. /*
  642. * we're not allowed to leave a node with one item in the
  643. * tree during a delete. A deletion from lower in the tree
  644. * could try to delete the only pointer in this node.
  645. * So, pull some keys from the left.
  646. * There has to be a left pointer at this point because
  647. * otherwise we would have pulled some pointers from the
  648. * right
  649. */
  650. BUG_ON(!left);
  651. wret = balance_node_right(trans, root, mid, left);
  652. if (wret < 0) {
  653. ret = wret;
  654. goto enospc;
  655. }
  656. BUG_ON(wret == 1);
  657. }
  658. if (btrfs_header_nritems(mid) == 0) {
  659. /* we've managed to empty the middle node, drop it */
  660. u64 bytenr = mid->start;
  661. u32 blocksize = mid->len;
  662. clean_tree_block(trans, root, mid);
  663. wait_on_tree_block_writeback(root, mid);
  664. free_extent_buffer(mid);
  665. mid = NULL;
  666. wret = del_ptr(trans, root, path, level + 1, pslot);
  667. if (wret)
  668. ret = wret;
  669. wret = btrfs_free_extent(trans, root, bytenr, blocksize, 1);
  670. if (wret)
  671. ret = wret;
  672. } else {
  673. /* update the parent key to reflect our changes */
  674. struct btrfs_disk_key mid_key;
  675. btrfs_node_key(mid, &mid_key, 0);
  676. btrfs_set_node_key(parent, &mid_key, pslot);
  677. btrfs_mark_buffer_dirty(parent);
  678. }
  679. /* update the path */
  680. if (left) {
  681. if (btrfs_header_nritems(left) > orig_slot) {
  682. extent_buffer_get(left);
  683. path->nodes[level] = left;
  684. path->slots[level + 1] -= 1;
  685. path->slots[level] = orig_slot;
  686. if (mid)
  687. free_extent_buffer(mid);
  688. } else {
  689. orig_slot -= btrfs_header_nritems(left);
  690. path->slots[level] = orig_slot;
  691. }
  692. }
  693. /* double check we haven't messed things up */
  694. check_block(root, path, level);
  695. if (orig_ptr !=
  696. btrfs_node_blockptr(path->nodes[level], path->slots[level]))
  697. BUG();
  698. enospc:
  699. if (right)
  700. free_extent_buffer(right);
  701. if (left)
  702. free_extent_buffer(left);
  703. return ret;
  704. }
  705. /* returns zero if the push worked, non-zero otherwise */
  706. static int push_nodes_for_insert(struct btrfs_trans_handle *trans,
  707. struct btrfs_root *root,
  708. struct btrfs_path *path, int level)
  709. {
  710. struct extent_buffer *right = NULL;
  711. struct extent_buffer *mid;
  712. struct extent_buffer *left = NULL;
  713. struct extent_buffer *parent = NULL;
  714. int ret = 0;
  715. int wret;
  716. int pslot;
  717. int orig_slot = path->slots[level];
  718. u64 orig_ptr;
  719. if (level == 0)
  720. return 1;
  721. mid = path->nodes[level];
  722. orig_ptr = btrfs_node_blockptr(mid, orig_slot);
  723. if (level < BTRFS_MAX_LEVEL - 1)
  724. parent = path->nodes[level + 1];
  725. pslot = path->slots[level + 1];
  726. if (!parent)
  727. return 1;
  728. left = read_node_slot(root, parent, pslot - 1);
  729. /* first, try to make some room in the middle buffer */
  730. if (left) {
  731. u32 left_nr;
  732. left_nr = btrfs_header_nritems(left);
  733. if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
  734. wret = 1;
  735. } else {
  736. ret = btrfs_cow_block(trans, root, left, parent,
  737. pslot - 1, &left);
  738. if (ret)
  739. wret = 1;
  740. else {
  741. wret = push_node_left(trans, root,
  742. left, mid);
  743. }
  744. }
  745. if (wret < 0)
  746. ret = wret;
  747. if (wret == 0) {
  748. struct btrfs_disk_key disk_key;
  749. orig_slot += left_nr;
  750. btrfs_node_key(mid, &disk_key, 0);
  751. btrfs_set_node_key(parent, &disk_key, pslot);
  752. btrfs_mark_buffer_dirty(parent);
  753. if (btrfs_header_nritems(left) > orig_slot) {
  754. path->nodes[level] = left;
  755. path->slots[level + 1] -= 1;
  756. path->slots[level] = orig_slot;
  757. free_extent_buffer(mid);
  758. } else {
  759. orig_slot -=
  760. btrfs_header_nritems(left);
  761. path->slots[level] = orig_slot;
  762. free_extent_buffer(left);
  763. }
  764. return 0;
  765. }
  766. free_extent_buffer(left);
  767. }
  768. right= read_node_slot(root, parent, pslot + 1);
  769. /*
  770. * then try to empty the right most buffer into the middle
  771. */
  772. if (right) {
  773. u32 right_nr;
  774. right_nr = btrfs_header_nritems(right);
  775. if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
  776. wret = 1;
  777. } else {
  778. ret = btrfs_cow_block(trans, root, right,
  779. parent, pslot + 1,
  780. &right);
  781. if (ret)
  782. wret = 1;
  783. else {
  784. wret = balance_node_right(trans, root,
  785. right, mid);
  786. }
  787. }
  788. if (wret < 0)
  789. ret = wret;
  790. if (wret == 0) {
  791. struct btrfs_disk_key disk_key;
  792. btrfs_node_key(right, &disk_key, 0);
  793. btrfs_set_node_key(parent, &disk_key, pslot + 1);
  794. btrfs_mark_buffer_dirty(parent);
  795. if (btrfs_header_nritems(mid) <= orig_slot) {
  796. path->nodes[level] = right;
  797. path->slots[level + 1] += 1;
  798. path->slots[level] = orig_slot -
  799. btrfs_header_nritems(mid);
  800. free_extent_buffer(mid);
  801. } else {
  802. free_extent_buffer(right);
  803. }
  804. return 0;
  805. }
  806. free_extent_buffer(right);
  807. }
  808. return 1;
  809. }
  810. /*
  811. * readahead one full node of leaves
  812. */
  813. static void reada_for_search(struct btrfs_root *root, struct btrfs_path *path,
  814. int level, int slot)
  815. {
  816. struct extent_buffer *node;
  817. u32 nritems;
  818. u64 search;
  819. u64 lowest_read;
  820. u64 highest_read;
  821. u64 nread = 0;
  822. int direction = path->reada;
  823. struct extent_buffer *eb;
  824. u32 nr;
  825. u32 blocksize;
  826. u32 nscan = 0;
  827. if (level != 1)
  828. return;
  829. if (!path->nodes[level])
  830. return;
  831. node = path->nodes[level];
  832. search = btrfs_node_blockptr(node, slot);
  833. blocksize = btrfs_level_size(root, level - 1);
  834. eb = btrfs_find_tree_block(root, search, blocksize);
  835. if (eb) {
  836. free_extent_buffer(eb);
  837. return;
  838. }
  839. highest_read = search;
  840. lowest_read = search;
  841. nritems = btrfs_header_nritems(node);
  842. nr = slot;
  843. while(1) {
  844. if (direction < 0) {
  845. if (nr == 0)
  846. break;
  847. nr--;
  848. } else if (direction > 0) {
  849. nr++;
  850. if (nr >= nritems)
  851. break;
  852. }
  853. search = btrfs_node_blockptr(node, nr);
  854. if ((search >= lowest_read && search <= highest_read) ||
  855. (search < lowest_read && lowest_read - search <= 32768) ||
  856. (search > highest_read && search - highest_read <= 32768)) {
  857. readahead_tree_block(root, search, blocksize);
  858. nread += blocksize;
  859. }
  860. nscan++;
  861. if (path->reada < 2 && (nread > (256 * 1024) || nscan > 32))
  862. break;
  863. if(nread > (1024 * 1024) || nscan > 128)
  864. break;
  865. if (search < lowest_read)
  866. lowest_read = search;
  867. if (search > highest_read)
  868. highest_read = search;
  869. }
  870. }
  871. /*
  872. * look for key in the tree. path is filled in with nodes along the way
  873. * if key is found, we return zero and you can find the item in the leaf
  874. * level of the path (level 0)
  875. *
  876. * If the key isn't found, the path points to the slot where it should
  877. * be inserted, and 1 is returned. If there are other errors during the
  878. * search a negative error number is returned.
  879. *
  880. * if ins_len > 0, nodes and leaves will be split as we walk down the
  881. * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
  882. * possible)
  883. */
  884. int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
  885. *root, struct btrfs_key *key, struct btrfs_path *p, int
  886. ins_len, int cow)
  887. {
  888. struct extent_buffer *b;
  889. u64 bytenr;
  890. int slot;
  891. int ret;
  892. int level;
  893. int should_reada = p->reada;
  894. u8 lowest_level = 0;
  895. lowest_level = p->lowest_level;
  896. WARN_ON(lowest_level && ins_len);
  897. WARN_ON(p->nodes[0] != NULL);
  898. WARN_ON(!mutex_is_locked(&root->fs_info->fs_mutex));
  899. again:
  900. b = root->node;
  901. extent_buffer_get(b);
  902. while (b) {
  903. level = btrfs_header_level(b);
  904. if (cow) {
  905. int wret;
  906. wret = btrfs_cow_block(trans, root, b,
  907. p->nodes[level + 1],
  908. p->slots[level + 1],
  909. &b);
  910. if (wret) {
  911. free_extent_buffer(b);
  912. return wret;
  913. }
  914. }
  915. BUG_ON(!cow && ins_len);
  916. if (level != btrfs_header_level(b))
  917. WARN_ON(1);
  918. level = btrfs_header_level(b);
  919. p->nodes[level] = b;
  920. ret = check_block(root, p, level);
  921. if (ret)
  922. return -1;
  923. ret = bin_search(b, key, level, &slot);
  924. if (level != 0) {
  925. if (ret && slot > 0)
  926. slot -= 1;
  927. p->slots[level] = slot;
  928. if (ins_len > 0 && btrfs_header_nritems(b) >=
  929. BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
  930. int sret = split_node(trans, root, p, level);
  931. BUG_ON(sret > 0);
  932. if (sret)
  933. return sret;
  934. b = p->nodes[level];
  935. slot = p->slots[level];
  936. } else if (ins_len < 0) {
  937. int sret = balance_level(trans, root, p,
  938. level);
  939. if (sret)
  940. return sret;
  941. b = p->nodes[level];
  942. if (!b) {
  943. btrfs_release_path(NULL, p);
  944. goto again;
  945. }
  946. slot = p->slots[level];
  947. BUG_ON(btrfs_header_nritems(b) == 1);
  948. }
  949. /* this is only true while dropping a snapshot */
  950. if (level == lowest_level)
  951. break;
  952. bytenr = btrfs_node_blockptr(b, slot);
  953. if (should_reada)
  954. reada_for_search(root, p, level, slot);
  955. b = read_tree_block(root, bytenr,
  956. btrfs_level_size(root, level - 1));
  957. } else {
  958. p->slots[level] = slot;
  959. if (ins_len > 0 && btrfs_leaf_free_space(root, b) <
  960. sizeof(struct btrfs_item) + ins_len) {
  961. int sret = split_leaf(trans, root, key,
  962. p, ins_len, ret == 0);
  963. BUG_ON(sret > 0);
  964. if (sret)
  965. return sret;
  966. }
  967. return ret;
  968. }
  969. }
  970. return 1;
  971. }
  972. /*
  973. * adjust the pointers going up the tree, starting at level
  974. * making sure the right key of each node is points to 'key'.
  975. * This is used after shifting pointers to the left, so it stops
  976. * fixing up pointers when a given leaf/node is not in slot 0 of the
  977. * higher levels
  978. *
  979. * If this fails to write a tree block, it returns -1, but continues
  980. * fixing up the blocks in ram so the tree is consistent.
  981. */
  982. static int fixup_low_keys(struct btrfs_trans_handle *trans,
  983. struct btrfs_root *root, struct btrfs_path *path,
  984. struct btrfs_disk_key *key, int level)
  985. {
  986. int i;
  987. int ret = 0;
  988. struct extent_buffer *t;
  989. for (i = level; i < BTRFS_MAX_LEVEL; i++) {
  990. int tslot = path->slots[i];
  991. if (!path->nodes[i])
  992. break;
  993. t = path->nodes[i];
  994. btrfs_set_node_key(t, key, tslot);
  995. btrfs_mark_buffer_dirty(path->nodes[i]);
  996. if (tslot != 0)
  997. break;
  998. }
  999. return ret;
  1000. }
  1001. /*
  1002. * try to push data from one node into the next node left in the
  1003. * tree.
  1004. *
  1005. * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
  1006. * error, and > 0 if there was no room in the left hand block.
  1007. */
  1008. static int push_node_left(struct btrfs_trans_handle *trans, struct btrfs_root
  1009. *root, struct extent_buffer *dst,
  1010. struct extent_buffer *src)
  1011. {
  1012. int push_items = 0;
  1013. int src_nritems;
  1014. int dst_nritems;
  1015. int ret = 0;
  1016. src_nritems = btrfs_header_nritems(src);
  1017. dst_nritems = btrfs_header_nritems(dst);
  1018. push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
  1019. if (push_items <= 0) {
  1020. return 1;
  1021. }
  1022. if (src_nritems < push_items)
  1023. push_items = src_nritems;
  1024. copy_extent_buffer(dst, src,
  1025. btrfs_node_key_ptr_offset(dst_nritems),
  1026. btrfs_node_key_ptr_offset(0),
  1027. push_items * sizeof(struct btrfs_key_ptr));
  1028. if (push_items < src_nritems) {
  1029. memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
  1030. btrfs_node_key_ptr_offset(push_items),
  1031. (src_nritems - push_items) *
  1032. sizeof(struct btrfs_key_ptr));
  1033. }
  1034. btrfs_set_header_nritems(src, src_nritems - push_items);
  1035. btrfs_set_header_nritems(dst, dst_nritems + push_items);
  1036. btrfs_mark_buffer_dirty(src);
  1037. btrfs_mark_buffer_dirty(dst);
  1038. return ret;
  1039. }
  1040. /*
  1041. * try to push data from one node into the next node right in the
  1042. * tree.
  1043. *
  1044. * returns 0 if some ptrs were pushed, < 0 if there was some horrible
  1045. * error, and > 0 if there was no room in the right hand block.
  1046. *
  1047. * this will only push up to 1/2 the contents of the left node over
  1048. */
  1049. static int balance_node_right(struct btrfs_trans_handle *trans,
  1050. struct btrfs_root *root,
  1051. struct extent_buffer *dst,
  1052. struct extent_buffer *src)
  1053. {
  1054. int push_items = 0;
  1055. int max_push;
  1056. int src_nritems;
  1057. int dst_nritems;
  1058. int ret = 0;
  1059. src_nritems = btrfs_header_nritems(src);
  1060. dst_nritems = btrfs_header_nritems(dst);
  1061. push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
  1062. if (push_items <= 0)
  1063. return 1;
  1064. max_push = src_nritems / 2 + 1;
  1065. /* don't try to empty the node */
  1066. if (max_push >= src_nritems)
  1067. return 1;
  1068. if (max_push < push_items)
  1069. push_items = max_push;
  1070. memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
  1071. btrfs_node_key_ptr_offset(0),
  1072. (dst_nritems) *
  1073. sizeof(struct btrfs_key_ptr));
  1074. copy_extent_buffer(dst, src,
  1075. btrfs_node_key_ptr_offset(0),
  1076. btrfs_node_key_ptr_offset(src_nritems - push_items),
  1077. push_items * sizeof(struct btrfs_key_ptr));
  1078. btrfs_set_header_nritems(src, src_nritems - push_items);
  1079. btrfs_set_header_nritems(dst, dst_nritems + push_items);
  1080. btrfs_mark_buffer_dirty(src);
  1081. btrfs_mark_buffer_dirty(dst);
  1082. return ret;
  1083. }
  1084. /*
  1085. * helper function to insert a new root level in the tree.
  1086. * A new node is allocated, and a single item is inserted to
  1087. * point to the existing root
  1088. *
  1089. * returns zero on success or < 0 on failure.
  1090. */
  1091. static int insert_new_root(struct btrfs_trans_handle *trans,
  1092. struct btrfs_root *root,
  1093. struct btrfs_path *path, int level)
  1094. {
  1095. struct extent_buffer *lower;
  1096. struct extent_buffer *c;
  1097. struct btrfs_disk_key lower_key;
  1098. BUG_ON(path->nodes[level]);
  1099. BUG_ON(path->nodes[level-1] != root->node);
  1100. c = btrfs_alloc_free_block(trans, root, root->nodesize,
  1101. root->node->start, 0);
  1102. if (IS_ERR(c))
  1103. return PTR_ERR(c);
  1104. memset_extent_buffer(c, 0, 0, root->nodesize);
  1105. btrfs_set_header_nritems(c, 1);
  1106. btrfs_set_header_level(c, level);
  1107. btrfs_set_header_bytenr(c, c->start);
  1108. btrfs_set_header_generation(c, trans->transid);
  1109. btrfs_set_header_owner(c, root->root_key.objectid);
  1110. lower = path->nodes[level-1];
  1111. write_extent_buffer(c, root->fs_info->fsid,
  1112. (unsigned long)btrfs_header_fsid(c),
  1113. BTRFS_FSID_SIZE);
  1114. if (level == 1)
  1115. btrfs_item_key(lower, &lower_key, 0);
  1116. else
  1117. btrfs_node_key(lower, &lower_key, 0);
  1118. btrfs_set_node_key(c, &lower_key, 0);
  1119. btrfs_set_node_blockptr(c, 0, lower->start);
  1120. btrfs_mark_buffer_dirty(c);
  1121. /* the super has an extra ref to root->node */
  1122. free_extent_buffer(root->node);
  1123. root->node = c;
  1124. extent_buffer_get(c);
  1125. path->nodes[level] = c;
  1126. path->slots[level] = 0;
  1127. return 0;
  1128. }
  1129. /*
  1130. * worker function to insert a single pointer in a node.
  1131. * the node should have enough room for the pointer already
  1132. *
  1133. * slot and level indicate where you want the key to go, and
  1134. * blocknr is the block the key points to.
  1135. *
  1136. * returns zero on success and < 0 on any error
  1137. */
  1138. static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
  1139. *root, struct btrfs_path *path, struct btrfs_disk_key
  1140. *key, u64 bytenr, int slot, int level)
  1141. {
  1142. struct extent_buffer *lower;
  1143. int nritems;
  1144. BUG_ON(!path->nodes[level]);
  1145. lower = path->nodes[level];
  1146. nritems = btrfs_header_nritems(lower);
  1147. if (slot > nritems)
  1148. BUG();
  1149. if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
  1150. BUG();
  1151. if (slot != nritems) {
  1152. memmove_extent_buffer(lower,
  1153. btrfs_node_key_ptr_offset(slot + 1),
  1154. btrfs_node_key_ptr_offset(slot),
  1155. (nritems - slot) * sizeof(struct btrfs_key_ptr));
  1156. }
  1157. btrfs_set_node_key(lower, key, slot);
  1158. btrfs_set_node_blockptr(lower, slot, bytenr);
  1159. btrfs_set_header_nritems(lower, nritems + 1);
  1160. btrfs_mark_buffer_dirty(lower);
  1161. return 0;
  1162. }
  1163. /*
  1164. * split the node at the specified level in path in two.
  1165. * The path is corrected to point to the appropriate node after the split
  1166. *
  1167. * Before splitting this tries to make some room in the node by pushing
  1168. * left and right, if either one works, it returns right away.
  1169. *
  1170. * returns 0 on success and < 0 on failure
  1171. */
  1172. static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
  1173. *root, struct btrfs_path *path, int level)
  1174. {
  1175. struct extent_buffer *c;
  1176. struct extent_buffer *split;
  1177. struct btrfs_disk_key disk_key;
  1178. int mid;
  1179. int ret;
  1180. int wret;
  1181. u32 c_nritems;
  1182. c = path->nodes[level];
  1183. if (c == root->node) {
  1184. /* trying to split the root, lets make a new one */
  1185. ret = insert_new_root(trans, root, path, level + 1);
  1186. if (ret)
  1187. return ret;
  1188. } else {
  1189. ret = push_nodes_for_insert(trans, root, path, level);
  1190. c = path->nodes[level];
  1191. if (!ret && btrfs_header_nritems(c) <
  1192. BTRFS_NODEPTRS_PER_BLOCK(root) - 1)
  1193. return 0;
  1194. if (ret < 0)
  1195. return ret;
  1196. }
  1197. c_nritems = btrfs_header_nritems(c);
  1198. split = btrfs_alloc_free_block(trans, root, root->nodesize,
  1199. c->start, 0);
  1200. if (IS_ERR(split))
  1201. return PTR_ERR(split);
  1202. btrfs_set_header_flags(split, btrfs_header_flags(c));
  1203. btrfs_set_header_level(split, btrfs_header_level(c));
  1204. btrfs_set_header_bytenr(split, split->start);
  1205. btrfs_set_header_generation(split, trans->transid);
  1206. btrfs_set_header_owner(split, root->root_key.objectid);
  1207. write_extent_buffer(split, root->fs_info->fsid,
  1208. (unsigned long)btrfs_header_fsid(split),
  1209. BTRFS_FSID_SIZE);
  1210. mid = (c_nritems + 1) / 2;
  1211. copy_extent_buffer(split, c,
  1212. btrfs_node_key_ptr_offset(0),
  1213. btrfs_node_key_ptr_offset(mid),
  1214. (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
  1215. btrfs_set_header_nritems(split, c_nritems - mid);
  1216. btrfs_set_header_nritems(c, mid);
  1217. ret = 0;
  1218. btrfs_mark_buffer_dirty(c);
  1219. btrfs_mark_buffer_dirty(split);
  1220. btrfs_node_key(split, &disk_key, 0);
  1221. wret = insert_ptr(trans, root, path, &disk_key, split->start,
  1222. path->slots[level + 1] + 1,
  1223. level + 1);
  1224. if (wret)
  1225. ret = wret;
  1226. if (path->slots[level] >= mid) {
  1227. path->slots[level] -= mid;
  1228. free_extent_buffer(c);
  1229. path->nodes[level] = split;
  1230. path->slots[level + 1] += 1;
  1231. } else {
  1232. free_extent_buffer(split);
  1233. }
  1234. return ret;
  1235. }
  1236. /*
  1237. * how many bytes are required to store the items in a leaf. start
  1238. * and nr indicate which items in the leaf to check. This totals up the
  1239. * space used both by the item structs and the item data
  1240. */
  1241. static int leaf_space_used(struct extent_buffer *l, int start, int nr)
  1242. {
  1243. int data_len;
  1244. int nritems = btrfs_header_nritems(l);
  1245. int end = min(nritems, start + nr) - 1;
  1246. if (!nr)
  1247. return 0;
  1248. data_len = btrfs_item_end_nr(l, start);
  1249. data_len = data_len - btrfs_item_offset_nr(l, end);
  1250. data_len += sizeof(struct btrfs_item) * nr;
  1251. WARN_ON(data_len < 0);
  1252. return data_len;
  1253. }
  1254. /*
  1255. * The space between the end of the leaf items and
  1256. * the start of the leaf data. IOW, how much room
  1257. * the leaf has left for both items and data
  1258. */
  1259. int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf)
  1260. {
  1261. int nritems = btrfs_header_nritems(leaf);
  1262. int ret;
  1263. ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
  1264. if (ret < 0) {
  1265. printk("leaf free space ret %d, leaf data size %lu, used %d nritems %d\n",
  1266. ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
  1267. leaf_space_used(leaf, 0, nritems), nritems);
  1268. }
  1269. return ret;
  1270. }
  1271. /*
  1272. * push some data in the path leaf to the right, trying to free up at
  1273. * least data_size bytes. returns zero if the push worked, nonzero otherwise
  1274. *
  1275. * returns 1 if the push failed because the other node didn't have enough
  1276. * room, 0 if everything worked out and < 0 if there were major errors.
  1277. */
  1278. static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
  1279. *root, struct btrfs_path *path, int data_size)
  1280. {
  1281. struct extent_buffer *left = path->nodes[0];
  1282. struct extent_buffer *right;
  1283. struct extent_buffer *upper;
  1284. struct btrfs_disk_key disk_key;
  1285. int slot;
  1286. int i;
  1287. int free_space;
  1288. int push_space = 0;
  1289. int push_items = 0;
  1290. struct btrfs_item *item;
  1291. u32 left_nritems;
  1292. u32 right_nritems;
  1293. u32 data_end;
  1294. u32 this_item_size;
  1295. int ret;
  1296. slot = path->slots[1];
  1297. if (!path->nodes[1]) {
  1298. return 1;
  1299. }
  1300. upper = path->nodes[1];
  1301. if (slot >= btrfs_header_nritems(upper) - 1)
  1302. return 1;
  1303. right = read_tree_block(root, btrfs_node_blockptr(upper, slot + 1),
  1304. root->leafsize);
  1305. free_space = btrfs_leaf_free_space(root, right);
  1306. if (free_space < data_size + sizeof(struct btrfs_item)) {
  1307. free_extent_buffer(right);
  1308. return 1;
  1309. }
  1310. /* cow and double check */
  1311. ret = btrfs_cow_block(trans, root, right, upper,
  1312. slot + 1, &right);
  1313. if (ret) {
  1314. free_extent_buffer(right);
  1315. return 1;
  1316. }
  1317. free_space = btrfs_leaf_free_space(root, right);
  1318. if (free_space < data_size + sizeof(struct btrfs_item)) {
  1319. free_extent_buffer(right);
  1320. return 1;
  1321. }
  1322. left_nritems = btrfs_header_nritems(left);
  1323. if (left_nritems == 0) {
  1324. free_extent_buffer(right);
  1325. return 1;
  1326. }
  1327. for (i = left_nritems - 1; i >= 1; i--) {
  1328. item = btrfs_item_nr(left, i);
  1329. if (path->slots[0] == i)
  1330. push_space += data_size + sizeof(*item);
  1331. if (!left->map_token) {
  1332. map_extent_buffer(left, (unsigned long)item,
  1333. sizeof(struct btrfs_item),
  1334. &left->map_token, &left->kaddr,
  1335. &left->map_start, &left->map_len,
  1336. KM_USER1);
  1337. }
  1338. this_item_size = btrfs_item_size(left, item);
  1339. if (this_item_size + sizeof(*item) + push_space > free_space)
  1340. break;
  1341. push_items++;
  1342. push_space += this_item_size + sizeof(*item);
  1343. }
  1344. if (left->map_token) {
  1345. unmap_extent_buffer(left, left->map_token, KM_USER1);
  1346. left->map_token = NULL;
  1347. }
  1348. if (push_items == 0) {
  1349. free_extent_buffer(right);
  1350. return 1;
  1351. }
  1352. if (push_items == left_nritems)
  1353. WARN_ON(1);
  1354. /* push left to right */
  1355. right_nritems = btrfs_header_nritems(right);
  1356. push_space = btrfs_item_end_nr(left, left_nritems - push_items);
  1357. push_space -= leaf_data_end(root, left);
  1358. /* make room in the right data area */
  1359. data_end = leaf_data_end(root, right);
  1360. memmove_extent_buffer(right,
  1361. btrfs_leaf_data(right) + data_end - push_space,
  1362. btrfs_leaf_data(right) + data_end,
  1363. BTRFS_LEAF_DATA_SIZE(root) - data_end);
  1364. /* copy from the left data area */
  1365. copy_extent_buffer(right, left, btrfs_leaf_data(right) +
  1366. BTRFS_LEAF_DATA_SIZE(root) - push_space,
  1367. btrfs_leaf_data(left) + leaf_data_end(root, left),
  1368. push_space);
  1369. memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
  1370. btrfs_item_nr_offset(0),
  1371. right_nritems * sizeof(struct btrfs_item));
  1372. /* copy the items from left to right */
  1373. copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
  1374. btrfs_item_nr_offset(left_nritems - push_items),
  1375. push_items * sizeof(struct btrfs_item));
  1376. /* update the item pointers */
  1377. right_nritems += push_items;
  1378. btrfs_set_header_nritems(right, right_nritems);
  1379. push_space = BTRFS_LEAF_DATA_SIZE(root);
  1380. for (i = 0; i < right_nritems; i++) {
  1381. item = btrfs_item_nr(right, i);
  1382. if (!right->map_token) {
  1383. map_extent_buffer(right, (unsigned long)item,
  1384. sizeof(struct btrfs_item),
  1385. &right->map_token, &right->kaddr,
  1386. &right->map_start, &right->map_len,
  1387. KM_USER1);
  1388. }
  1389. push_space -= btrfs_item_size(right, item);
  1390. btrfs_set_item_offset(right, item, push_space);
  1391. }
  1392. if (right->map_token) {
  1393. unmap_extent_buffer(right, right->map_token, KM_USER1);
  1394. right->map_token = NULL;
  1395. }
  1396. left_nritems -= push_items;
  1397. btrfs_set_header_nritems(left, left_nritems);
  1398. btrfs_mark_buffer_dirty(left);
  1399. btrfs_mark_buffer_dirty(right);
  1400. btrfs_item_key(right, &disk_key, 0);
  1401. btrfs_set_node_key(upper, &disk_key, slot + 1);
  1402. btrfs_mark_buffer_dirty(upper);
  1403. /* then fixup the leaf pointer in the path */
  1404. if (path->slots[0] >= left_nritems) {
  1405. path->slots[0] -= left_nritems;
  1406. free_extent_buffer(path->nodes[0]);
  1407. path->nodes[0] = right;
  1408. path->slots[1] += 1;
  1409. } else {
  1410. free_extent_buffer(right);
  1411. }
  1412. return 0;
  1413. }
  1414. /*
  1415. * push some data in the path leaf to the left, trying to free up at
  1416. * least data_size bytes. returns zero if the push worked, nonzero otherwise
  1417. */
  1418. static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
  1419. *root, struct btrfs_path *path, int data_size)
  1420. {
  1421. struct btrfs_disk_key disk_key;
  1422. struct extent_buffer *right = path->nodes[0];
  1423. struct extent_buffer *left;
  1424. int slot;
  1425. int i;
  1426. int free_space;
  1427. int push_space = 0;
  1428. int push_items = 0;
  1429. struct btrfs_item *item;
  1430. u32 old_left_nritems;
  1431. u32 right_nritems;
  1432. int ret = 0;
  1433. int wret;
  1434. u32 this_item_size;
  1435. u32 old_left_item_size;
  1436. slot = path->slots[1];
  1437. if (slot == 0)
  1438. return 1;
  1439. if (!path->nodes[1])
  1440. return 1;
  1441. right_nritems = btrfs_header_nritems(right);
  1442. if (right_nritems == 0) {
  1443. return 1;
  1444. }
  1445. left = read_tree_block(root, btrfs_node_blockptr(path->nodes[1],
  1446. slot - 1), root->leafsize);
  1447. free_space = btrfs_leaf_free_space(root, left);
  1448. if (free_space < data_size + sizeof(struct btrfs_item)) {
  1449. free_extent_buffer(left);
  1450. return 1;
  1451. }
  1452. /* cow and double check */
  1453. ret = btrfs_cow_block(trans, root, left,
  1454. path->nodes[1], slot - 1, &left);
  1455. if (ret) {
  1456. /* we hit -ENOSPC, but it isn't fatal here */
  1457. free_extent_buffer(left);
  1458. return 1;
  1459. }
  1460. free_space = btrfs_leaf_free_space(root, left);
  1461. if (free_space < data_size + sizeof(struct btrfs_item)) {
  1462. free_extent_buffer(left);
  1463. return 1;
  1464. }
  1465. for (i = 0; i < right_nritems - 1; i++) {
  1466. item = btrfs_item_nr(right, i);
  1467. if (!right->map_token) {
  1468. map_extent_buffer(right, (unsigned long)item,
  1469. sizeof(struct btrfs_item),
  1470. &right->map_token, &right->kaddr,
  1471. &right->map_start, &right->map_len,
  1472. KM_USER1);
  1473. }
  1474. if (path->slots[0] == i)
  1475. push_space += data_size + sizeof(*item);
  1476. this_item_size = btrfs_item_size(right, item);
  1477. if (this_item_size + sizeof(*item) + push_space > free_space)
  1478. break;
  1479. push_items++;
  1480. push_space += this_item_size + sizeof(*item);
  1481. }
  1482. if (right->map_token) {
  1483. unmap_extent_buffer(right, right->map_token, KM_USER1);
  1484. right->map_token = NULL;
  1485. }
  1486. if (push_items == 0) {
  1487. free_extent_buffer(left);
  1488. return 1;
  1489. }
  1490. if (push_items == btrfs_header_nritems(right))
  1491. WARN_ON(1);
  1492. /* push data from right to left */
  1493. copy_extent_buffer(left, right,
  1494. btrfs_item_nr_offset(btrfs_header_nritems(left)),
  1495. btrfs_item_nr_offset(0),
  1496. push_items * sizeof(struct btrfs_item));
  1497. push_space = BTRFS_LEAF_DATA_SIZE(root) -
  1498. btrfs_item_offset_nr(right, push_items -1);
  1499. copy_extent_buffer(left, right, btrfs_leaf_data(left) +
  1500. leaf_data_end(root, left) - push_space,
  1501. btrfs_leaf_data(right) +
  1502. btrfs_item_offset_nr(right, push_items - 1),
  1503. push_space);
  1504. old_left_nritems = btrfs_header_nritems(left);
  1505. BUG_ON(old_left_nritems < 0);
  1506. old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
  1507. for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
  1508. u32 ioff;
  1509. item = btrfs_item_nr(left, i);
  1510. if (!left->map_token) {
  1511. map_extent_buffer(left, (unsigned long)item,
  1512. sizeof(struct btrfs_item),
  1513. &left->map_token, &left->kaddr,
  1514. &left->map_start, &left->map_len,
  1515. KM_USER1);
  1516. }
  1517. ioff = btrfs_item_offset(left, item);
  1518. btrfs_set_item_offset(left, item,
  1519. ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
  1520. }
  1521. btrfs_set_header_nritems(left, old_left_nritems + push_items);
  1522. if (left->map_token) {
  1523. unmap_extent_buffer(left, left->map_token, KM_USER1);
  1524. left->map_token = NULL;
  1525. }
  1526. /* fixup right node */
  1527. push_space = btrfs_item_offset_nr(right, push_items - 1) -
  1528. leaf_data_end(root, right);
  1529. memmove_extent_buffer(right, btrfs_leaf_data(right) +
  1530. BTRFS_LEAF_DATA_SIZE(root) - push_space,
  1531. btrfs_leaf_data(right) +
  1532. leaf_data_end(root, right), push_space);
  1533. memmove_extent_buffer(right, btrfs_item_nr_offset(0),
  1534. btrfs_item_nr_offset(push_items),
  1535. (btrfs_header_nritems(right) - push_items) *
  1536. sizeof(struct btrfs_item));
  1537. right_nritems = btrfs_header_nritems(right) - push_items;
  1538. btrfs_set_header_nritems(right, right_nritems);
  1539. push_space = BTRFS_LEAF_DATA_SIZE(root);
  1540. for (i = 0; i < right_nritems; i++) {
  1541. item = btrfs_item_nr(right, i);
  1542. if (!right->map_token) {
  1543. map_extent_buffer(right, (unsigned long)item,
  1544. sizeof(struct btrfs_item),
  1545. &right->map_token, &right->kaddr,
  1546. &right->map_start, &right->map_len,
  1547. KM_USER1);
  1548. }
  1549. push_space = push_space - btrfs_item_size(right, item);
  1550. btrfs_set_item_offset(right, item, push_space);
  1551. }
  1552. if (right->map_token) {
  1553. unmap_extent_buffer(right, right->map_token, KM_USER1);
  1554. right->map_token = NULL;
  1555. }
  1556. btrfs_mark_buffer_dirty(left);
  1557. btrfs_mark_buffer_dirty(right);
  1558. btrfs_item_key(right, &disk_key, 0);
  1559. wret = fixup_low_keys(trans, root, path, &disk_key, 1);
  1560. if (wret)
  1561. ret = wret;
  1562. /* then fixup the leaf pointer in the path */
  1563. if (path->slots[0] < push_items) {
  1564. path->slots[0] += old_left_nritems;
  1565. free_extent_buffer(path->nodes[0]);
  1566. path->nodes[0] = left;
  1567. path->slots[1] -= 1;
  1568. } else {
  1569. free_extent_buffer(left);
  1570. path->slots[0] -= push_items;
  1571. }
  1572. BUG_ON(path->slots[0] < 0);
  1573. return ret;
  1574. }
  1575. /*
  1576. * split the path's leaf in two, making sure there is at least data_size
  1577. * available for the resulting leaf level of the path.
  1578. *
  1579. * returns 0 if all went well and < 0 on failure.
  1580. */
  1581. static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
  1582. *root, struct btrfs_key *ins_key,
  1583. struct btrfs_path *path, int data_size, int extend)
  1584. {
  1585. struct extent_buffer *l;
  1586. u32 nritems;
  1587. int mid;
  1588. int slot;
  1589. struct extent_buffer *right;
  1590. int space_needed = data_size + sizeof(struct btrfs_item);
  1591. int data_copy_size;
  1592. int rt_data_off;
  1593. int i;
  1594. int ret = 0;
  1595. int wret;
  1596. int double_split;
  1597. int num_doubles = 0;
  1598. struct btrfs_disk_key disk_key;
  1599. if (extend)
  1600. space_needed = data_size;
  1601. /* first try to make some room by pushing left and right */
  1602. if (ins_key->type != BTRFS_DIR_ITEM_KEY) {
  1603. wret = push_leaf_right(trans, root, path, data_size);
  1604. if (wret < 0) {
  1605. return wret;
  1606. }
  1607. if (wret) {
  1608. wret = push_leaf_left(trans, root, path, data_size);
  1609. if (wret < 0)
  1610. return wret;
  1611. }
  1612. l = path->nodes[0];
  1613. /* did the pushes work? */
  1614. if (btrfs_leaf_free_space(root, l) >= space_needed)
  1615. return 0;
  1616. }
  1617. if (!path->nodes[1]) {
  1618. ret = insert_new_root(trans, root, path, 1);
  1619. if (ret)
  1620. return ret;
  1621. }
  1622. again:
  1623. double_split = 0;
  1624. l = path->nodes[0];
  1625. slot = path->slots[0];
  1626. nritems = btrfs_header_nritems(l);
  1627. mid = (nritems + 1)/ 2;
  1628. right = btrfs_alloc_free_block(trans, root, root->leafsize,
  1629. l->start, 0);
  1630. if (IS_ERR(right))
  1631. return PTR_ERR(right);
  1632. memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
  1633. btrfs_set_header_bytenr(right, right->start);
  1634. btrfs_set_header_generation(right, trans->transid);
  1635. btrfs_set_header_owner(right, root->root_key.objectid);
  1636. btrfs_set_header_level(right, 0);
  1637. write_extent_buffer(right, root->fs_info->fsid,
  1638. (unsigned long)btrfs_header_fsid(right),
  1639. BTRFS_FSID_SIZE);
  1640. if (mid <= slot) {
  1641. if (nritems == 1 ||
  1642. leaf_space_used(l, mid, nritems - mid) + space_needed >
  1643. BTRFS_LEAF_DATA_SIZE(root)) {
  1644. if (slot >= nritems) {
  1645. btrfs_cpu_key_to_disk(&disk_key, ins_key);
  1646. btrfs_set_header_nritems(right, 0);
  1647. wret = insert_ptr(trans, root, path,
  1648. &disk_key, right->start,
  1649. path->slots[1] + 1, 1);
  1650. if (wret)
  1651. ret = wret;
  1652. free_extent_buffer(path->nodes[0]);
  1653. path->nodes[0] = right;
  1654. path->slots[0] = 0;
  1655. path->slots[1] += 1;
  1656. return ret;
  1657. }
  1658. mid = slot;
  1659. if (mid != nritems &&
  1660. leaf_space_used(l, mid, nritems - mid) +
  1661. space_needed > BTRFS_LEAF_DATA_SIZE(root)) {
  1662. double_split = 1;
  1663. }
  1664. }
  1665. } else {
  1666. if (leaf_space_used(l, 0, mid + 1) + space_needed >
  1667. BTRFS_LEAF_DATA_SIZE(root)) {
  1668. if (!extend && slot == 0) {
  1669. btrfs_cpu_key_to_disk(&disk_key, ins_key);
  1670. btrfs_set_header_nritems(right, 0);
  1671. wret = insert_ptr(trans, root, path,
  1672. &disk_key,
  1673. right->start,
  1674. path->slots[1], 1);
  1675. if (wret)
  1676. ret = wret;
  1677. free_extent_buffer(path->nodes[0]);
  1678. path->nodes[0] = right;
  1679. path->slots[0] = 0;
  1680. if (path->slots[1] == 0) {
  1681. wret = fixup_low_keys(trans, root,
  1682. path, &disk_key, 1);
  1683. if (wret)
  1684. ret = wret;
  1685. }
  1686. return ret;
  1687. } else if (extend && slot == 0) {
  1688. mid = 1;
  1689. } else {
  1690. mid = slot;
  1691. if (mid != nritems &&
  1692. leaf_space_used(l, mid, nritems - mid) +
  1693. space_needed > BTRFS_LEAF_DATA_SIZE(root)) {
  1694. double_split = 1;
  1695. }
  1696. }
  1697. }
  1698. }
  1699. nritems = nritems - mid;
  1700. btrfs_set_header_nritems(right, nritems);
  1701. data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
  1702. copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
  1703. btrfs_item_nr_offset(mid),
  1704. nritems * sizeof(struct btrfs_item));
  1705. copy_extent_buffer(right, l,
  1706. btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
  1707. data_copy_size, btrfs_leaf_data(l) +
  1708. leaf_data_end(root, l), data_copy_size);
  1709. rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
  1710. btrfs_item_end_nr(l, mid);
  1711. for (i = 0; i < nritems; i++) {
  1712. struct btrfs_item *item = btrfs_item_nr(right, i);
  1713. u32 ioff;
  1714. if (!right->map_token) {
  1715. map_extent_buffer(right, (unsigned long)item,
  1716. sizeof(struct btrfs_item),
  1717. &right->map_token, &right->kaddr,
  1718. &right->map_start, &right->map_len,
  1719. KM_USER1);
  1720. }
  1721. ioff = btrfs_item_offset(right, item);
  1722. btrfs_set_item_offset(right, item, ioff + rt_data_off);
  1723. }
  1724. if (right->map_token) {
  1725. unmap_extent_buffer(right, right->map_token, KM_USER1);
  1726. right->map_token = NULL;
  1727. }
  1728. btrfs_set_header_nritems(l, mid);
  1729. ret = 0;
  1730. btrfs_item_key(right, &disk_key, 0);
  1731. wret = insert_ptr(trans, root, path, &disk_key, right->start,
  1732. path->slots[1] + 1, 1);
  1733. if (wret)
  1734. ret = wret;
  1735. btrfs_mark_buffer_dirty(right);
  1736. btrfs_mark_buffer_dirty(l);
  1737. BUG_ON(path->slots[0] != slot);
  1738. if (mid <= slot) {
  1739. free_extent_buffer(path->nodes[0]);
  1740. path->nodes[0] = right;
  1741. path->slots[0] -= mid;
  1742. path->slots[1] += 1;
  1743. } else
  1744. free_extent_buffer(right);
  1745. BUG_ON(path->slots[0] < 0);
  1746. if (double_split) {
  1747. BUG_ON(num_doubles != 0);
  1748. num_doubles++;
  1749. goto again;
  1750. }
  1751. return ret;
  1752. }
  1753. int btrfs_truncate_item(struct btrfs_trans_handle *trans,
  1754. struct btrfs_root *root,
  1755. struct btrfs_path *path,
  1756. u32 new_size, int from_end)
  1757. {
  1758. int ret = 0;
  1759. int slot;
  1760. int slot_orig;
  1761. struct extent_buffer *leaf;
  1762. struct btrfs_item *item;
  1763. u32 nritems;
  1764. unsigned int data_end;
  1765. unsigned int old_data_start;
  1766. unsigned int old_size;
  1767. unsigned int size_diff;
  1768. int i;
  1769. slot_orig = path->slots[0];
  1770. leaf = path->nodes[0];
  1771. slot = path->slots[0];
  1772. old_size = btrfs_item_size_nr(leaf, slot);
  1773. if (old_size == new_size)
  1774. return 0;
  1775. nritems = btrfs_header_nritems(leaf);
  1776. data_end = leaf_data_end(root, leaf);
  1777. old_data_start = btrfs_item_offset_nr(leaf, slot);
  1778. size_diff = old_size - new_size;
  1779. BUG_ON(slot < 0);
  1780. BUG_ON(slot >= nritems);
  1781. /*
  1782. * item0..itemN ... dataN.offset..dataN.size .. data0.size
  1783. */
  1784. /* first correct the data pointers */
  1785. for (i = slot; i < nritems; i++) {
  1786. u32 ioff;
  1787. item = btrfs_item_nr(leaf, i);
  1788. if (!leaf->map_token) {
  1789. map_extent_buffer(leaf, (unsigned long)item,
  1790. sizeof(struct btrfs_item),
  1791. &leaf->map_token, &leaf->kaddr,
  1792. &leaf->map_start, &leaf->map_len,
  1793. KM_USER1);
  1794. }
  1795. ioff = btrfs_item_offset(leaf, item);
  1796. btrfs_set_item_offset(leaf, item, ioff + size_diff);
  1797. }
  1798. if (leaf->map_token) {
  1799. unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
  1800. leaf->map_token = NULL;
  1801. }
  1802. /* shift the data */
  1803. if (from_end) {
  1804. memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
  1805. data_end + size_diff, btrfs_leaf_data(leaf) +
  1806. data_end, old_data_start + new_size - data_end);
  1807. } else {
  1808. struct btrfs_disk_key disk_key;
  1809. u64 offset;
  1810. btrfs_item_key(leaf, &disk_key, slot);
  1811. if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
  1812. unsigned long ptr;
  1813. struct btrfs_file_extent_item *fi;
  1814. fi = btrfs_item_ptr(leaf, slot,
  1815. struct btrfs_file_extent_item);
  1816. fi = (struct btrfs_file_extent_item *)(
  1817. (unsigned long)fi - size_diff);
  1818. if (btrfs_file_extent_type(leaf, fi) ==
  1819. BTRFS_FILE_EXTENT_INLINE) {
  1820. ptr = btrfs_item_ptr_offset(leaf, slot);
  1821. memmove_extent_buffer(leaf, ptr,
  1822. (unsigned long)fi,
  1823. offsetof(struct btrfs_file_extent_item,
  1824. disk_bytenr));
  1825. }
  1826. }
  1827. memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
  1828. data_end + size_diff, btrfs_leaf_data(leaf) +
  1829. data_end, old_data_start - data_end);
  1830. offset = btrfs_disk_key_offset(&disk_key);
  1831. btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
  1832. btrfs_set_item_key(leaf, &disk_key, slot);
  1833. if (slot == 0)
  1834. fixup_low_keys(trans, root, path, &disk_key, 1);
  1835. }
  1836. item = btrfs_item_nr(leaf, slot);
  1837. btrfs_set_item_size(leaf, item, new_size);
  1838. btrfs_mark_buffer_dirty(leaf);
  1839. ret = 0;
  1840. if (btrfs_leaf_free_space(root, leaf) < 0) {
  1841. btrfs_print_leaf(root, leaf);
  1842. BUG();
  1843. }
  1844. return ret;
  1845. }
  1846. int btrfs_extend_item(struct btrfs_trans_handle *trans,
  1847. struct btrfs_root *root, struct btrfs_path *path,
  1848. u32 data_size)
  1849. {
  1850. int ret = 0;
  1851. int slot;
  1852. int slot_orig;
  1853. struct extent_buffer *leaf;
  1854. struct btrfs_item *item;
  1855. u32 nritems;
  1856. unsigned int data_end;
  1857. unsigned int old_data;
  1858. unsigned int old_size;
  1859. int i;
  1860. slot_orig = path->slots[0];
  1861. leaf = path->nodes[0];
  1862. nritems = btrfs_header_nritems(leaf);
  1863. data_end = leaf_data_end(root, leaf);
  1864. if (btrfs_leaf_free_space(root, leaf) < data_size) {
  1865. btrfs_print_leaf(root, leaf);
  1866. BUG();
  1867. }
  1868. slot = path->slots[0];
  1869. old_data = btrfs_item_end_nr(leaf, slot);
  1870. BUG_ON(slot < 0);
  1871. if (slot >= nritems) {
  1872. btrfs_print_leaf(root, leaf);
  1873. printk("slot %d too large, nritems %d\n", slot, nritems);
  1874. BUG_ON(1);
  1875. }
  1876. /*
  1877. * item0..itemN ... dataN.offset..dataN.size .. data0.size
  1878. */
  1879. /* first correct the data pointers */
  1880. for (i = slot; i < nritems; i++) {
  1881. u32 ioff;
  1882. item = btrfs_item_nr(leaf, i);
  1883. if (!leaf->map_token) {
  1884. map_extent_buffer(leaf, (unsigned long)item,
  1885. sizeof(struct btrfs_item),
  1886. &leaf->map_token, &leaf->kaddr,
  1887. &leaf->map_start, &leaf->map_len,
  1888. KM_USER1);
  1889. }
  1890. ioff = btrfs_item_offset(leaf, item);
  1891. btrfs_set_item_offset(leaf, item, ioff - data_size);
  1892. }
  1893. if (leaf->map_token) {
  1894. unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
  1895. leaf->map_token = NULL;
  1896. }
  1897. /* shift the data */
  1898. memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
  1899. data_end - data_size, btrfs_leaf_data(leaf) +
  1900. data_end, old_data - data_end);
  1901. data_end = old_data;
  1902. old_size = btrfs_item_size_nr(leaf, slot);
  1903. item = btrfs_item_nr(leaf, slot);
  1904. btrfs_set_item_size(leaf, item, old_size + data_size);
  1905. btrfs_mark_buffer_dirty(leaf);
  1906. ret = 0;
  1907. if (btrfs_leaf_free_space(root, leaf) < 0) {
  1908. btrfs_print_leaf(root, leaf);
  1909. BUG();
  1910. }
  1911. return ret;
  1912. }
  1913. /*
  1914. * Given a key and some data, insert an item into the tree.
  1915. * This does all the path init required, making room in the tree if needed.
  1916. */
  1917. int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
  1918. struct btrfs_root *root,
  1919. struct btrfs_path *path,
  1920. struct btrfs_key *cpu_key, u32 data_size)
  1921. {
  1922. struct extent_buffer *leaf;
  1923. struct btrfs_item *item;
  1924. int ret = 0;
  1925. int slot;
  1926. int slot_orig;
  1927. u32 nritems;
  1928. unsigned int data_end;
  1929. struct btrfs_disk_key disk_key;
  1930. btrfs_cpu_key_to_disk(&disk_key, cpu_key);
  1931. /* create a root if there isn't one */
  1932. if (!root->node)
  1933. BUG();
  1934. ret = btrfs_search_slot(trans, root, cpu_key, path, data_size, 1);
  1935. if (ret == 0) {
  1936. return -EEXIST;
  1937. }
  1938. if (ret < 0)
  1939. goto out;
  1940. slot_orig = path->slots[0];
  1941. leaf = path->nodes[0];
  1942. nritems = btrfs_header_nritems(leaf);
  1943. data_end = leaf_data_end(root, leaf);
  1944. if (btrfs_leaf_free_space(root, leaf) <
  1945. sizeof(struct btrfs_item) + data_size) {
  1946. btrfs_print_leaf(root, leaf);
  1947. printk("not enough freespace need %u have %d\n",
  1948. data_size, btrfs_leaf_free_space(root, leaf));
  1949. BUG();
  1950. }
  1951. slot = path->slots[0];
  1952. BUG_ON(slot < 0);
  1953. if (slot != nritems) {
  1954. int i;
  1955. unsigned int old_data = btrfs_item_end_nr(leaf, slot);
  1956. if (old_data < data_end) {
  1957. btrfs_print_leaf(root, leaf);
  1958. printk("slot %d old_data %d data_end %d\n",
  1959. slot, old_data, data_end);
  1960. BUG_ON(1);
  1961. }
  1962. /*
  1963. * item0..itemN ... dataN.offset..dataN.size .. data0.size
  1964. */
  1965. /* first correct the data pointers */
  1966. WARN_ON(leaf->map_token);
  1967. for (i = slot; i < nritems; i++) {
  1968. u32 ioff;
  1969. item = btrfs_item_nr(leaf, i);
  1970. if (!leaf->map_token) {
  1971. map_extent_buffer(leaf, (unsigned long)item,
  1972. sizeof(struct btrfs_item),
  1973. &leaf->map_token, &leaf->kaddr,
  1974. &leaf->map_start, &leaf->map_len,
  1975. KM_USER1);
  1976. }
  1977. ioff = btrfs_item_offset(leaf, item);
  1978. btrfs_set_item_offset(leaf, item, ioff - data_size);
  1979. }
  1980. if (leaf->map_token) {
  1981. unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
  1982. leaf->map_token = NULL;
  1983. }
  1984. /* shift the items */
  1985. memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
  1986. btrfs_item_nr_offset(slot),
  1987. (nritems - slot) * sizeof(struct btrfs_item));
  1988. /* shift the data */
  1989. memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
  1990. data_end - data_size, btrfs_leaf_data(leaf) +
  1991. data_end, old_data - data_end);
  1992. data_end = old_data;
  1993. }
  1994. /* setup the item for the new data */
  1995. btrfs_set_item_key(leaf, &disk_key, slot);
  1996. item = btrfs_item_nr(leaf, slot);
  1997. btrfs_set_item_offset(leaf, item, data_end - data_size);
  1998. btrfs_set_item_size(leaf, item, data_size);
  1999. btrfs_set_header_nritems(leaf, nritems + 1);
  2000. btrfs_mark_buffer_dirty(leaf);
  2001. ret = 0;
  2002. if (slot == 0)
  2003. ret = fixup_low_keys(trans, root, path, &disk_key, 1);
  2004. if (btrfs_leaf_free_space(root, leaf) < 0) {
  2005. btrfs_print_leaf(root, leaf);
  2006. BUG();
  2007. }
  2008. out:
  2009. return ret;
  2010. }
  2011. /*
  2012. * Given a key and some data, insert an item into the tree.
  2013. * This does all the path init required, making room in the tree if needed.
  2014. */
  2015. int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
  2016. *root, struct btrfs_key *cpu_key, void *data, u32
  2017. data_size)
  2018. {
  2019. int ret = 0;
  2020. struct btrfs_path *path;
  2021. struct extent_buffer *leaf;
  2022. unsigned long ptr;
  2023. path = btrfs_alloc_path();
  2024. BUG_ON(!path);
  2025. ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
  2026. if (!ret) {
  2027. leaf = path->nodes[0];
  2028. ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
  2029. write_extent_buffer(leaf, data, ptr, data_size);
  2030. btrfs_mark_buffer_dirty(leaf);
  2031. }
  2032. btrfs_free_path(path);
  2033. return ret;
  2034. }
  2035. /*
  2036. * delete the pointer from a given node.
  2037. *
  2038. * If the delete empties a node, the node is removed from the tree,
  2039. * continuing all the way the root if required. The root is converted into
  2040. * a leaf if all the nodes are emptied.
  2041. */
  2042. static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2043. struct btrfs_path *path, int level, int slot)
  2044. {
  2045. struct extent_buffer *parent = path->nodes[level];
  2046. u32 nritems;
  2047. int ret = 0;
  2048. int wret;
  2049. nritems = btrfs_header_nritems(parent);
  2050. if (slot != nritems -1) {
  2051. memmove_extent_buffer(parent,
  2052. btrfs_node_key_ptr_offset(slot),
  2053. btrfs_node_key_ptr_offset(slot + 1),
  2054. sizeof(struct btrfs_key_ptr) *
  2055. (nritems - slot - 1));
  2056. }
  2057. nritems--;
  2058. btrfs_set_header_nritems(parent, nritems);
  2059. if (nritems == 0 && parent == root->node) {
  2060. BUG_ON(btrfs_header_level(root->node) != 1);
  2061. /* just turn the root into a leaf and break */
  2062. btrfs_set_header_level(root->node, 0);
  2063. } else if (slot == 0) {
  2064. struct btrfs_disk_key disk_key;
  2065. btrfs_node_key(parent, &disk_key, 0);
  2066. wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
  2067. if (wret)
  2068. ret = wret;
  2069. }
  2070. btrfs_mark_buffer_dirty(parent);
  2071. return ret;
  2072. }
  2073. /*
  2074. * delete the item at the leaf level in path. If that empties
  2075. * the leaf, remove it from the tree
  2076. */
  2077. int btrfs_del_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2078. struct btrfs_path *path)
  2079. {
  2080. int slot;
  2081. struct extent_buffer *leaf;
  2082. struct btrfs_item *item;
  2083. int doff;
  2084. int dsize;
  2085. int ret = 0;
  2086. int wret;
  2087. u32 nritems;
  2088. leaf = path->nodes[0];
  2089. slot = path->slots[0];
  2090. doff = btrfs_item_offset_nr(leaf, slot);
  2091. dsize = btrfs_item_size_nr(leaf, slot);
  2092. nritems = btrfs_header_nritems(leaf);
  2093. if (slot != nritems - 1) {
  2094. int i;
  2095. int data_end = leaf_data_end(root, leaf);
  2096. memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
  2097. data_end + dsize,
  2098. btrfs_leaf_data(leaf) + data_end,
  2099. doff - data_end);
  2100. for (i = slot + 1; i < nritems; i++) {
  2101. u32 ioff;
  2102. item = btrfs_item_nr(leaf, i);
  2103. if (!leaf->map_token) {
  2104. map_extent_buffer(leaf, (unsigned long)item,
  2105. sizeof(struct btrfs_item),
  2106. &leaf->map_token, &leaf->kaddr,
  2107. &leaf->map_start, &leaf->map_len,
  2108. KM_USER1);
  2109. }
  2110. ioff = btrfs_item_offset(leaf, item);
  2111. btrfs_set_item_offset(leaf, item, ioff + dsize);
  2112. }
  2113. if (leaf->map_token) {
  2114. unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
  2115. leaf->map_token = NULL;
  2116. }
  2117. memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
  2118. btrfs_item_nr_offset(slot + 1),
  2119. sizeof(struct btrfs_item) *
  2120. (nritems - slot - 1));
  2121. }
  2122. btrfs_set_header_nritems(leaf, nritems - 1);
  2123. nritems--;
  2124. /* delete the leaf if we've emptied it */
  2125. if (nritems == 0) {
  2126. if (leaf == root->node) {
  2127. btrfs_set_header_level(leaf, 0);
  2128. } else {
  2129. clean_tree_block(trans, root, leaf);
  2130. wait_on_tree_block_writeback(root, leaf);
  2131. wret = del_ptr(trans, root, path, 1, path->slots[1]);
  2132. if (wret)
  2133. ret = wret;
  2134. wret = btrfs_free_extent(trans, root,
  2135. leaf->start, leaf->len, 1);
  2136. if (wret)
  2137. ret = wret;
  2138. }
  2139. } else {
  2140. int used = leaf_space_used(leaf, 0, nritems);
  2141. if (slot == 0) {
  2142. struct btrfs_disk_key disk_key;
  2143. btrfs_item_key(leaf, &disk_key, 0);
  2144. wret = fixup_low_keys(trans, root, path,
  2145. &disk_key, 1);
  2146. if (wret)
  2147. ret = wret;
  2148. }
  2149. /* delete the leaf if it is mostly empty */
  2150. if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
  2151. /* push_leaf_left fixes the path.
  2152. * make sure the path still points to our leaf
  2153. * for possible call to del_ptr below
  2154. */
  2155. slot = path->slots[1];
  2156. extent_buffer_get(leaf);
  2157. wret = push_leaf_right(trans, root, path, 1);
  2158. if (wret < 0 && wret != -ENOSPC)
  2159. ret = wret;
  2160. if (path->nodes[0] == leaf &&
  2161. btrfs_header_nritems(leaf)) {
  2162. wret = push_leaf_left(trans, root, path, 1);
  2163. if (wret < 0 && wret != -ENOSPC)
  2164. ret = wret;
  2165. }
  2166. if (btrfs_header_nritems(leaf) == 0) {
  2167. u64 bytenr = leaf->start;
  2168. u32 blocksize = leaf->len;
  2169. clean_tree_block(trans, root, leaf);
  2170. wait_on_tree_block_writeback(root, leaf);
  2171. wret = del_ptr(trans, root, path, 1, slot);
  2172. if (wret)
  2173. ret = wret;
  2174. free_extent_buffer(leaf);
  2175. wret = btrfs_free_extent(trans, root, bytenr,
  2176. blocksize, 1);
  2177. if (wret)
  2178. ret = wret;
  2179. } else {
  2180. btrfs_mark_buffer_dirty(leaf);
  2181. free_extent_buffer(leaf);
  2182. }
  2183. } else {
  2184. btrfs_mark_buffer_dirty(leaf);
  2185. }
  2186. }
  2187. return ret;
  2188. }
  2189. /*
  2190. * walk up the tree as far as required to find the next leaf.
  2191. * returns 0 if it found something or 1 if there are no greater leaves.
  2192. * returns < 0 on io errors.
  2193. */
  2194. int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
  2195. {
  2196. int slot;
  2197. int level = 1;
  2198. u64 bytenr;
  2199. struct extent_buffer *c;
  2200. struct extent_buffer *next = NULL;
  2201. while(level < BTRFS_MAX_LEVEL) {
  2202. if (!path->nodes[level])
  2203. return 1;
  2204. slot = path->slots[level] + 1;
  2205. c = path->nodes[level];
  2206. if (slot >= btrfs_header_nritems(c)) {
  2207. level++;
  2208. continue;
  2209. }
  2210. bytenr = btrfs_node_blockptr(c, slot);
  2211. if (next)
  2212. free_extent_buffer(next);
  2213. if (path->reada)
  2214. reada_for_search(root, path, level, slot);
  2215. next = read_tree_block(root, bytenr,
  2216. btrfs_level_size(root, level -1));
  2217. break;
  2218. }
  2219. path->slots[level] = slot;
  2220. while(1) {
  2221. level--;
  2222. c = path->nodes[level];
  2223. free_extent_buffer(c);
  2224. path->nodes[level] = next;
  2225. path->slots[level] = 0;
  2226. if (!level)
  2227. break;
  2228. if (path->reada)
  2229. reada_for_search(root, path, level, 0);
  2230. next = read_tree_block(root, btrfs_node_blockptr(next, 0),
  2231. btrfs_level_size(root, level - 1));
  2232. }
  2233. return 0;
  2234. }