disk-io.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/module.h>
  19. #include <linux/fs.h>
  20. #include <linux/blkdev.h>
  21. #include <linux/crc32c.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/swap.h>
  24. #include <linux/radix-tree.h>
  25. #include <linux/writeback.h>
  26. #include "ctree.h"
  27. #include "disk-io.h"
  28. #include "transaction.h"
  29. #include "btrfs_inode.h"
  30. u64 bh_blocknr(struct buffer_head *bh)
  31. {
  32. return bh->b_blocknr;
  33. }
  34. static int check_tree_block(struct btrfs_root *root, struct buffer_head *buf)
  35. {
  36. struct btrfs_node *node = btrfs_buffer_node(buf);
  37. if (bh_blocknr(buf) != btrfs_header_blocknr(&node->header)) {
  38. printk(KERN_CRIT "bh_blocknr(buf) is %llu, header is %llu\n",
  39. (unsigned long long)bh_blocknr(buf),
  40. (unsigned long long)btrfs_header_blocknr(&node->header));
  41. return 1;
  42. }
  43. return 0;
  44. }
  45. struct buffer_head *btrfs_find_tree_block(struct btrfs_root *root, u64 blocknr)
  46. {
  47. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  48. int blockbits = root->fs_info->sb->s_blocksize_bits;
  49. unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
  50. struct page *page;
  51. struct buffer_head *bh;
  52. struct buffer_head *head;
  53. struct buffer_head *ret = NULL;
  54. page = find_lock_page(mapping, index);
  55. if (!page)
  56. return NULL;
  57. if (!page_has_buffers(page))
  58. goto out_unlock;
  59. head = page_buffers(page);
  60. bh = head;
  61. do {
  62. if (buffer_mapped(bh) && bh_blocknr(bh) == blocknr) {
  63. ret = bh;
  64. get_bh(bh);
  65. goto out_unlock;
  66. }
  67. bh = bh->b_this_page;
  68. } while (bh != head);
  69. out_unlock:
  70. unlock_page(page);
  71. page_cache_release(page);
  72. return ret;
  73. }
  74. int btrfs_map_bh_to_logical(struct btrfs_root *root, struct buffer_head *bh,
  75. u64 logical)
  76. {
  77. if (logical == 0) {
  78. bh->b_bdev = NULL;
  79. bh->b_blocknr = 0;
  80. set_buffer_mapped(bh);
  81. } else {
  82. map_bh(bh, root->fs_info->sb, logical);
  83. }
  84. return 0;
  85. }
  86. struct buffer_head *btrfs_find_create_tree_block(struct btrfs_root *root,
  87. u64 blocknr)
  88. {
  89. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  90. int blockbits = root->fs_info->sb->s_blocksize_bits;
  91. unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
  92. struct page *page;
  93. struct buffer_head *bh;
  94. struct buffer_head *head;
  95. struct buffer_head *ret = NULL;
  96. int err;
  97. u64 first_block = index << (PAGE_CACHE_SHIFT - blockbits);
  98. page = find_or_create_page(mapping, index, GFP_NOFS);
  99. if (!page)
  100. return NULL;
  101. if (!page_has_buffers(page))
  102. create_empty_buffers(page, root->fs_info->sb->s_blocksize, 0);
  103. head = page_buffers(page);
  104. bh = head;
  105. do {
  106. if (!buffer_mapped(bh)) {
  107. err = btrfs_map_bh_to_logical(root, bh, first_block);
  108. BUG_ON(err);
  109. }
  110. if (bh_blocknr(bh) == blocknr) {
  111. ret = bh;
  112. get_bh(bh);
  113. goto out_unlock;
  114. }
  115. bh = bh->b_this_page;
  116. first_block++;
  117. } while (bh != head);
  118. out_unlock:
  119. unlock_page(page);
  120. if (ret)
  121. touch_buffer(ret);
  122. page_cache_release(page);
  123. return ret;
  124. }
  125. static int btree_get_block(struct inode *inode, sector_t iblock,
  126. struct buffer_head *bh, int create)
  127. {
  128. int err;
  129. struct btrfs_root *root = BTRFS_I(bh->b_page->mapping->host)->root;
  130. err = btrfs_map_bh_to_logical(root, bh, iblock);
  131. return err;
  132. }
  133. int btrfs_csum_data(struct btrfs_root * root, char *data, size_t len,
  134. char *result)
  135. {
  136. u32 crc;
  137. crc = crc32c(0, data, len);
  138. memcpy(result, &crc, BTRFS_CRC32_SIZE);
  139. return 0;
  140. }
  141. static int csum_tree_block(struct btrfs_root *root, struct buffer_head *bh,
  142. int verify)
  143. {
  144. char result[BTRFS_CRC32_SIZE];
  145. int ret;
  146. struct btrfs_node *node;
  147. ret = btrfs_csum_data(root, bh->b_data + BTRFS_CSUM_SIZE,
  148. bh->b_size - BTRFS_CSUM_SIZE, result);
  149. if (ret)
  150. return ret;
  151. if (verify) {
  152. if (memcmp(bh->b_data, result, BTRFS_CRC32_SIZE)) {
  153. printk("btrfs: %s checksum verify failed on %llu\n",
  154. root->fs_info->sb->s_id,
  155. (unsigned long long)bh_blocknr(bh));
  156. return 1;
  157. }
  158. } else {
  159. node = btrfs_buffer_node(bh);
  160. memcpy(node->header.csum, result, BTRFS_CRC32_SIZE);
  161. }
  162. return 0;
  163. }
  164. static int btree_writepage(struct page *page, struct writeback_control *wbc)
  165. {
  166. struct buffer_head *bh;
  167. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  168. struct buffer_head *head;
  169. if (!page_has_buffers(page)) {
  170. create_empty_buffers(page, root->fs_info->sb->s_blocksize,
  171. (1 << BH_Dirty)|(1 << BH_Uptodate));
  172. }
  173. head = page_buffers(page);
  174. bh = head;
  175. do {
  176. if (buffer_dirty(bh))
  177. csum_tree_block(root, bh, 0);
  178. bh = bh->b_this_page;
  179. } while (bh != head);
  180. return block_write_full_page(page, btree_get_block, wbc);
  181. }
  182. static int btree_readpage(struct file * file, struct page * page)
  183. {
  184. return block_read_full_page(page, btree_get_block);
  185. }
  186. static struct address_space_operations btree_aops = {
  187. .readpage = btree_readpage,
  188. .writepage = btree_writepage,
  189. .sync_page = block_sync_page,
  190. };
  191. int readahead_tree_block(struct btrfs_root *root, u64 blocknr)
  192. {
  193. struct buffer_head *bh = NULL;
  194. int ret = 0;
  195. bh = btrfs_find_create_tree_block(root, blocknr);
  196. if (!bh)
  197. return 0;
  198. if (buffer_uptodate(bh)) {
  199. ret = 1;
  200. goto done;
  201. }
  202. if (test_set_buffer_locked(bh)) {
  203. ret = 1;
  204. goto done;
  205. }
  206. if (!buffer_uptodate(bh)) {
  207. get_bh(bh);
  208. bh->b_end_io = end_buffer_read_sync;
  209. submit_bh(READ, bh);
  210. } else {
  211. unlock_buffer(bh);
  212. ret = 1;
  213. }
  214. done:
  215. brelse(bh);
  216. return ret;
  217. }
  218. struct buffer_head *read_tree_block(struct btrfs_root *root, u64 blocknr)
  219. {
  220. struct buffer_head *bh = NULL;
  221. bh = btrfs_find_create_tree_block(root, blocknr);
  222. if (!bh)
  223. return bh;
  224. if (buffer_uptodate(bh))
  225. goto uptodate;
  226. lock_buffer(bh);
  227. if (!buffer_uptodate(bh)) {
  228. get_bh(bh);
  229. bh->b_end_io = end_buffer_read_sync;
  230. submit_bh(READ, bh);
  231. wait_on_buffer(bh);
  232. if (!buffer_uptodate(bh))
  233. goto fail;
  234. } else {
  235. unlock_buffer(bh);
  236. }
  237. uptodate:
  238. if (!buffer_checked(bh)) {
  239. csum_tree_block(root, bh, 1);
  240. set_buffer_checked(bh);
  241. }
  242. if (check_tree_block(root, bh))
  243. goto fail;
  244. return bh;
  245. fail:
  246. brelse(bh);
  247. return NULL;
  248. }
  249. int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  250. struct buffer_head *buf)
  251. {
  252. WARN_ON(atomic_read(&buf->b_count) == 0);
  253. clear_buffer_dirty(buf);
  254. return 0;
  255. }
  256. static int __setup_root(int blocksize,
  257. struct btrfs_root *root,
  258. struct btrfs_fs_info *fs_info,
  259. u64 objectid)
  260. {
  261. root->node = NULL;
  262. root->inode = NULL;
  263. root->commit_root = NULL;
  264. root->blocksize = blocksize;
  265. root->ref_cows = 0;
  266. root->fs_info = fs_info;
  267. root->objectid = objectid;
  268. root->last_trans = 0;
  269. root->highest_inode = 0;
  270. root->last_inode_alloc = 0;
  271. memset(&root->root_key, 0, sizeof(root->root_key));
  272. memset(&root->root_item, 0, sizeof(root->root_item));
  273. root->root_key.objectid = objectid;
  274. return 0;
  275. }
  276. static int find_and_setup_root(int blocksize,
  277. struct btrfs_root *tree_root,
  278. struct btrfs_fs_info *fs_info,
  279. u64 objectid,
  280. struct btrfs_root *root)
  281. {
  282. int ret;
  283. __setup_root(blocksize, root, fs_info, objectid);
  284. ret = btrfs_find_last_root(tree_root, objectid,
  285. &root->root_item, &root->root_key);
  286. BUG_ON(ret);
  287. root->node = read_tree_block(root,
  288. btrfs_root_blocknr(&root->root_item));
  289. BUG_ON(!root->node);
  290. return 0;
  291. }
  292. struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_fs_info *fs_info,
  293. struct btrfs_key *location)
  294. {
  295. struct btrfs_root *root;
  296. struct btrfs_root *tree_root = fs_info->tree_root;
  297. struct btrfs_path *path;
  298. struct btrfs_leaf *l;
  299. u64 highest_inode;
  300. int ret = 0;
  301. root = kzalloc(sizeof(*root), GFP_NOFS);
  302. if (!root)
  303. return ERR_PTR(-ENOMEM);
  304. if (location->offset == (u64)-1) {
  305. ret = find_and_setup_root(fs_info->sb->s_blocksize,
  306. fs_info->tree_root, fs_info,
  307. location->objectid, root);
  308. if (ret) {
  309. kfree(root);
  310. return ERR_PTR(ret);
  311. }
  312. goto insert;
  313. }
  314. __setup_root(fs_info->sb->s_blocksize, root, fs_info,
  315. location->objectid);
  316. path = btrfs_alloc_path();
  317. BUG_ON(!path);
  318. ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
  319. if (ret != 0) {
  320. if (ret > 0)
  321. ret = -ENOENT;
  322. goto out;
  323. }
  324. l = btrfs_buffer_leaf(path->nodes[0]);
  325. memcpy(&root->root_item,
  326. btrfs_item_ptr(l, path->slots[0], struct btrfs_root_item),
  327. sizeof(root->root_item));
  328. memcpy(&root->root_key, location, sizeof(*location));
  329. ret = 0;
  330. out:
  331. btrfs_release_path(root, path);
  332. btrfs_free_path(path);
  333. if (ret) {
  334. kfree(root);
  335. return ERR_PTR(ret);
  336. }
  337. root->node = read_tree_block(root,
  338. btrfs_root_blocknr(&root->root_item));
  339. BUG_ON(!root->node);
  340. insert:
  341. root->ref_cows = 1;
  342. ret = btrfs_find_highest_inode(root, &highest_inode);
  343. if (ret == 0) {
  344. root->highest_inode = highest_inode;
  345. root->last_inode_alloc = highest_inode;
  346. }
  347. return root;
  348. }
  349. struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
  350. struct btrfs_key *location)
  351. {
  352. struct btrfs_root *root;
  353. int ret;
  354. root = radix_tree_lookup(&fs_info->fs_roots_radix,
  355. (unsigned long)location->objectid);
  356. if (root)
  357. return root;
  358. root = btrfs_read_fs_root_no_radix(fs_info, location);
  359. if (IS_ERR(root))
  360. return root;
  361. ret = radix_tree_insert(&fs_info->fs_roots_radix,
  362. (unsigned long)root->root_key.objectid,
  363. root);
  364. if (ret) {
  365. brelse(root->node);
  366. kfree(root);
  367. return ERR_PTR(ret);
  368. }
  369. return root;
  370. }
  371. struct btrfs_root *open_ctree(struct super_block *sb)
  372. {
  373. struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
  374. GFP_NOFS);
  375. struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
  376. GFP_NOFS);
  377. struct btrfs_fs_info *fs_info = kmalloc(sizeof(*fs_info),
  378. GFP_NOFS);
  379. int ret;
  380. int err = -EIO;
  381. struct btrfs_super_block *disk_super;
  382. if (!extent_root || !tree_root || !fs_info) {
  383. err = -ENOMEM;
  384. goto fail;
  385. }
  386. init_bit_radix(&fs_info->pinned_radix);
  387. init_bit_radix(&fs_info->pending_del_radix);
  388. init_bit_radix(&fs_info->extent_map_radix);
  389. INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
  390. INIT_RADIX_TREE(&fs_info->block_group_radix, GFP_KERNEL);
  391. INIT_RADIX_TREE(&fs_info->block_group_data_radix, GFP_KERNEL);
  392. INIT_LIST_HEAD(&fs_info->trans_list);
  393. INIT_LIST_HEAD(&fs_info->dead_roots);
  394. sb_set_blocksize(sb, 4096);
  395. fs_info->running_transaction = NULL;
  396. fs_info->tree_root = tree_root;
  397. fs_info->extent_root = extent_root;
  398. fs_info->sb = sb;
  399. fs_info->btree_inode = new_inode(sb);
  400. fs_info->btree_inode->i_ino = 1;
  401. fs_info->btree_inode->i_nlink = 1;
  402. fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
  403. fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
  404. fs_info->do_barriers = 1;
  405. fs_info->extent_tree_insert_nr = 0;
  406. fs_info->extent_tree_prealloc_nr = 0;
  407. fs_info->closing = 0;
  408. INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
  409. BTRFS_I(fs_info->btree_inode)->root = tree_root;
  410. memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
  411. sizeof(struct btrfs_key));
  412. insert_inode_hash(fs_info->btree_inode);
  413. mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
  414. mutex_init(&fs_info->trans_mutex);
  415. mutex_init(&fs_info->fs_mutex);
  416. __setup_root(sb->s_blocksize, tree_root,
  417. fs_info, BTRFS_ROOT_TREE_OBJECTID);
  418. fs_info->sb_buffer = read_tree_block(tree_root,
  419. BTRFS_SUPER_INFO_OFFSET /
  420. sb->s_blocksize);
  421. if (!fs_info->sb_buffer)
  422. goto fail_iput;
  423. disk_super = (struct btrfs_super_block *)fs_info->sb_buffer->b_data;
  424. fs_info->disk_super = disk_super;
  425. memcpy(&fs_info->super_copy, disk_super, sizeof(fs_info->super_copy));
  426. if (!btrfs_super_root(disk_super))
  427. goto fail_sb_buffer;
  428. i_size_write(fs_info->btree_inode,
  429. btrfs_super_total_blocks(disk_super) <<
  430. fs_info->btree_inode->i_blkbits);
  431. if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
  432. sizeof(disk_super->magic))) {
  433. printk("btrfs: valid FS not found on %s\n", sb->s_id);
  434. goto fail_sb_buffer;
  435. }
  436. tree_root->node = read_tree_block(tree_root,
  437. btrfs_super_root(disk_super));
  438. if (!tree_root->node)
  439. goto fail_sb_buffer;
  440. mutex_lock(&fs_info->fs_mutex);
  441. ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
  442. BTRFS_EXTENT_TREE_OBJECTID, extent_root);
  443. if (ret) {
  444. mutex_unlock(&fs_info->fs_mutex);
  445. goto fail_tree_root;
  446. }
  447. btrfs_read_block_groups(extent_root);
  448. fs_info->generation = btrfs_super_generation(disk_super) + 1;
  449. ret = btrfs_find_dead_roots(tree_root);
  450. if (ret)
  451. goto fail_tree_root;
  452. mutex_unlock(&fs_info->fs_mutex);
  453. return tree_root;
  454. fail_tree_root:
  455. btrfs_block_release(tree_root, tree_root->node);
  456. fail_sb_buffer:
  457. btrfs_block_release(tree_root, fs_info->sb_buffer);
  458. fail_iput:
  459. iput(fs_info->btree_inode);
  460. fail:
  461. kfree(extent_root);
  462. kfree(tree_root);
  463. kfree(fs_info);
  464. return ERR_PTR(err);
  465. }
  466. int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
  467. *root)
  468. {
  469. int ret;
  470. struct buffer_head *bh = root->fs_info->sb_buffer;
  471. lock_buffer(bh);
  472. WARN_ON(atomic_read(&bh->b_count) < 1);
  473. clear_buffer_dirty(bh);
  474. csum_tree_block(root, bh, 0);
  475. bh->b_end_io = end_buffer_write_sync;
  476. get_bh(bh);
  477. if (root->fs_info->do_barriers)
  478. ret = submit_bh(WRITE_BARRIER, bh);
  479. else
  480. ret = submit_bh(WRITE, bh);
  481. if (ret == -EOPNOTSUPP) {
  482. get_bh(bh);
  483. lock_buffer(bh);
  484. set_buffer_uptodate(bh);
  485. root->fs_info->do_barriers = 0;
  486. ret = submit_bh(WRITE, bh);
  487. }
  488. wait_on_buffer(bh);
  489. if (!buffer_uptodate(bh)) {
  490. WARN_ON(1);
  491. return -EIO;
  492. }
  493. return 0;
  494. }
  495. int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
  496. {
  497. radix_tree_delete(&fs_info->fs_roots_radix,
  498. (unsigned long)root->root_key.objectid);
  499. if (root->inode)
  500. iput(root->inode);
  501. if (root->node)
  502. brelse(root->node);
  503. if (root->commit_root)
  504. brelse(root->commit_root);
  505. kfree(root);
  506. return 0;
  507. }
  508. static int del_fs_roots(struct btrfs_fs_info *fs_info)
  509. {
  510. int ret;
  511. struct btrfs_root *gang[8];
  512. int i;
  513. while(1) {
  514. ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
  515. (void **)gang, 0,
  516. ARRAY_SIZE(gang));
  517. if (!ret)
  518. break;
  519. for (i = 0; i < ret; i++)
  520. btrfs_free_fs_root(fs_info, gang[i]);
  521. }
  522. return 0;
  523. }
  524. int close_ctree(struct btrfs_root *root)
  525. {
  526. int ret;
  527. struct btrfs_trans_handle *trans;
  528. struct btrfs_fs_info *fs_info = root->fs_info;
  529. fs_info->closing = 1;
  530. btrfs_transaction_flush_work(root);
  531. mutex_lock(&fs_info->fs_mutex);
  532. trans = btrfs_start_transaction(root, 1);
  533. ret = btrfs_commit_transaction(trans, root);
  534. /* run commit again to drop the original snapshot */
  535. trans = btrfs_start_transaction(root, 1);
  536. btrfs_commit_transaction(trans, root);
  537. ret = btrfs_write_and_wait_transaction(NULL, root);
  538. BUG_ON(ret);
  539. write_ctree_super(NULL, root);
  540. mutex_unlock(&fs_info->fs_mutex);
  541. if (fs_info->extent_root->node)
  542. btrfs_block_release(fs_info->extent_root,
  543. fs_info->extent_root->node);
  544. if (fs_info->tree_root->node)
  545. btrfs_block_release(fs_info->tree_root,
  546. fs_info->tree_root->node);
  547. btrfs_block_release(root, fs_info->sb_buffer);
  548. truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
  549. iput(fs_info->btree_inode);
  550. btrfs_free_block_groups(root->fs_info);
  551. del_fs_roots(fs_info);
  552. kfree(fs_info->extent_root);
  553. kfree(fs_info->tree_root);
  554. return 0;
  555. }
  556. void btrfs_mark_buffer_dirty(struct buffer_head *bh)
  557. {
  558. struct btrfs_root *root = BTRFS_I(bh->b_page->mapping->host)->root;
  559. u64 transid = btrfs_header_generation(btrfs_buffer_header(bh));
  560. WARN_ON(!atomic_read(&bh->b_count));
  561. if (transid != root->fs_info->generation) {
  562. printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
  563. (unsigned long long)bh->b_blocknr,
  564. transid, root->fs_info->generation);
  565. WARN_ON(1);
  566. }
  567. mark_buffer_dirty(bh);
  568. }
  569. void btrfs_block_release(struct btrfs_root *root, struct buffer_head *buf)
  570. {
  571. brelse(buf);
  572. }
  573. void btrfs_btree_balance_dirty(struct btrfs_root *root)
  574. {
  575. balance_dirty_pages_ratelimited(root->fs_info->btree_inode->i_mapping);
  576. }