disk-io.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/module.h>
  19. #include <linux/fs.h>
  20. #include <linux/blkdev.h>
  21. #include <linux/crc32c.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/swap.h>
  24. #include <linux/radix-tree.h>
  25. #include <linux/writeback.h>
  26. #include "ctree.h"
  27. #include "disk-io.h"
  28. #include "transaction.h"
  29. #include "btrfs_inode.h"
  30. u64 bh_blocknr(struct buffer_head *bh)
  31. {
  32. return bh->b_blocknr;
  33. }
  34. static int check_tree_block(struct btrfs_root *root, struct buffer_head *buf)
  35. {
  36. struct btrfs_node *node = btrfs_buffer_node(buf);
  37. if (bh_blocknr(buf) != btrfs_header_blocknr(&node->header)) {
  38. printk(KERN_CRIT "bh_blocknr(buf) is %llu, header is %llu\n",
  39. (unsigned long long)bh_blocknr(buf),
  40. (unsigned long long)btrfs_header_blocknr(&node->header));
  41. return 1;
  42. }
  43. return 0;
  44. }
  45. struct buffer_head *btrfs_find_tree_block(struct btrfs_root *root, u64 blocknr)
  46. {
  47. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  48. int blockbits = root->fs_info->sb->s_blocksize_bits;
  49. unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
  50. struct page *page;
  51. struct buffer_head *bh;
  52. struct buffer_head *head;
  53. struct buffer_head *ret = NULL;
  54. page = find_lock_page(mapping, index);
  55. if (!page)
  56. return NULL;
  57. if (!page_has_buffers(page))
  58. goto out_unlock;
  59. head = page_buffers(page);
  60. bh = head;
  61. do {
  62. if (buffer_mapped(bh) && bh_blocknr(bh) == blocknr) {
  63. ret = bh;
  64. get_bh(bh);
  65. goto out_unlock;
  66. }
  67. bh = bh->b_this_page;
  68. } while (bh != head);
  69. out_unlock:
  70. unlock_page(page);
  71. page_cache_release(page);
  72. return ret;
  73. }
  74. int btrfs_map_bh_to_logical(struct btrfs_root *root, struct buffer_head *bh,
  75. u64 logical)
  76. {
  77. if (logical == 0) {
  78. bh->b_bdev = NULL;
  79. bh->b_blocknr = 0;
  80. set_buffer_mapped(bh);
  81. } else {
  82. map_bh(bh, root->fs_info->sb, logical);
  83. }
  84. return 0;
  85. }
  86. struct buffer_head *btrfs_find_create_tree_block(struct btrfs_root *root,
  87. u64 blocknr)
  88. {
  89. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  90. int blockbits = root->fs_info->sb->s_blocksize_bits;
  91. unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
  92. struct page *page;
  93. struct buffer_head *bh;
  94. struct buffer_head *head;
  95. struct buffer_head *ret = NULL;
  96. int err;
  97. u64 first_block = index << (PAGE_CACHE_SHIFT - blockbits);
  98. page = find_or_create_page(mapping, index, GFP_NOFS);
  99. if (!page)
  100. return NULL;
  101. if (!page_has_buffers(page))
  102. create_empty_buffers(page, root->fs_info->sb->s_blocksize, 0);
  103. head = page_buffers(page);
  104. bh = head;
  105. do {
  106. if (!buffer_mapped(bh)) {
  107. err = btrfs_map_bh_to_logical(root, bh, first_block);
  108. BUG_ON(err);
  109. }
  110. if (bh_blocknr(bh) == blocknr) {
  111. ret = bh;
  112. get_bh(bh);
  113. goto out_unlock;
  114. }
  115. bh = bh->b_this_page;
  116. first_block++;
  117. } while (bh != head);
  118. out_unlock:
  119. unlock_page(page);
  120. if (ret)
  121. touch_buffer(ret);
  122. page_cache_release(page);
  123. return ret;
  124. }
  125. static int btree_get_block(struct inode *inode, sector_t iblock,
  126. struct buffer_head *bh, int create)
  127. {
  128. int err;
  129. struct btrfs_root *root = BTRFS_I(bh->b_page->mapping->host)->root;
  130. err = btrfs_map_bh_to_logical(root, bh, iblock);
  131. return err;
  132. }
  133. int btrfs_csum_data(struct btrfs_root * root, char *data, size_t len,
  134. char *result)
  135. {
  136. u32 crc;
  137. crc = crc32c(0, data, len);
  138. memcpy(result, &crc, BTRFS_CRC32_SIZE);
  139. return 0;
  140. }
  141. static int csum_tree_block(struct btrfs_root *root, struct buffer_head *bh,
  142. int verify)
  143. {
  144. char result[BTRFS_CRC32_SIZE];
  145. int ret;
  146. struct btrfs_node *node;
  147. ret = btrfs_csum_data(root, bh->b_data + BTRFS_CSUM_SIZE,
  148. bh->b_size - BTRFS_CSUM_SIZE, result);
  149. if (ret)
  150. return ret;
  151. if (verify) {
  152. if (memcmp(bh->b_data, result, BTRFS_CRC32_SIZE)) {
  153. printk("btrfs: %s checksum verify failed on %llu\n",
  154. root->fs_info->sb->s_id,
  155. (unsigned long long)bh_blocknr(bh));
  156. return 1;
  157. }
  158. } else {
  159. node = btrfs_buffer_node(bh);
  160. memcpy(node->header.csum, result, BTRFS_CRC32_SIZE);
  161. }
  162. return 0;
  163. }
  164. static int btree_writepage(struct page *page, struct writeback_control *wbc)
  165. {
  166. struct buffer_head *bh;
  167. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  168. struct buffer_head *head;
  169. if (!page_has_buffers(page)) {
  170. create_empty_buffers(page, root->fs_info->sb->s_blocksize,
  171. (1 << BH_Dirty)|(1 << BH_Uptodate));
  172. }
  173. head = page_buffers(page);
  174. bh = head;
  175. do {
  176. if (buffer_dirty(bh))
  177. csum_tree_block(root, bh, 0);
  178. bh = bh->b_this_page;
  179. } while (bh != head);
  180. return block_write_full_page(page, btree_get_block, wbc);
  181. }
  182. static int btree_readpage(struct file * file, struct page * page)
  183. {
  184. return block_read_full_page(page, btree_get_block);
  185. }
  186. static struct address_space_operations btree_aops = {
  187. .readpage = btree_readpage,
  188. .writepage = btree_writepage,
  189. .sync_page = block_sync_page,
  190. };
  191. int readahead_tree_block(struct btrfs_root *root, u64 blocknr)
  192. {
  193. struct buffer_head *bh = NULL;
  194. int ret = 0;
  195. bh = btrfs_find_create_tree_block(root, blocknr);
  196. if (!bh)
  197. return 0;
  198. if (buffer_uptodate(bh)) {
  199. ret = 1;
  200. goto done;
  201. }
  202. if (test_set_buffer_locked(bh)) {
  203. ret = 1;
  204. goto done;
  205. }
  206. if (!buffer_uptodate(bh)) {
  207. get_bh(bh);
  208. bh->b_end_io = end_buffer_read_sync;
  209. submit_bh(READ, bh);
  210. } else {
  211. unlock_buffer(bh);
  212. ret = 1;
  213. }
  214. done:
  215. brelse(bh);
  216. return ret;
  217. }
  218. struct buffer_head *read_tree_block(struct btrfs_root *root, u64 blocknr)
  219. {
  220. struct buffer_head *bh = NULL;
  221. bh = btrfs_find_create_tree_block(root, blocknr);
  222. if (!bh)
  223. return bh;
  224. if (buffer_uptodate(bh))
  225. goto uptodate;
  226. lock_buffer(bh);
  227. if (!buffer_uptodate(bh)) {
  228. get_bh(bh);
  229. bh->b_end_io = end_buffer_read_sync;
  230. submit_bh(READ, bh);
  231. wait_on_buffer(bh);
  232. if (!buffer_uptodate(bh))
  233. goto fail;
  234. } else {
  235. unlock_buffer(bh);
  236. }
  237. uptodate:
  238. if (!buffer_checked(bh)) {
  239. csum_tree_block(root, bh, 1);
  240. set_buffer_checked(bh);
  241. }
  242. if (check_tree_block(root, bh))
  243. goto fail;
  244. return bh;
  245. fail:
  246. brelse(bh);
  247. return NULL;
  248. }
  249. int dirty_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  250. struct buffer_head *buf)
  251. {
  252. WARN_ON(atomic_read(&buf->b_count) == 0);
  253. mark_buffer_dirty(buf);
  254. return 0;
  255. }
  256. int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  257. struct buffer_head *buf)
  258. {
  259. WARN_ON(atomic_read(&buf->b_count) == 0);
  260. clear_buffer_dirty(buf);
  261. return 0;
  262. }
  263. static int __setup_root(int blocksize,
  264. struct btrfs_root *root,
  265. struct btrfs_fs_info *fs_info,
  266. u64 objectid)
  267. {
  268. root->node = NULL;
  269. root->inode = NULL;
  270. root->commit_root = NULL;
  271. root->blocksize = blocksize;
  272. root->ref_cows = 0;
  273. root->fs_info = fs_info;
  274. root->objectid = objectid;
  275. root->last_trans = 0;
  276. root->highest_inode = 0;
  277. root->last_inode_alloc = 0;
  278. memset(&root->root_key, 0, sizeof(root->root_key));
  279. memset(&root->root_item, 0, sizeof(root->root_item));
  280. root->root_key.objectid = objectid;
  281. return 0;
  282. }
  283. static int find_and_setup_root(int blocksize,
  284. struct btrfs_root *tree_root,
  285. struct btrfs_fs_info *fs_info,
  286. u64 objectid,
  287. struct btrfs_root *root)
  288. {
  289. int ret;
  290. __setup_root(blocksize, root, fs_info, objectid);
  291. ret = btrfs_find_last_root(tree_root, objectid,
  292. &root->root_item, &root->root_key);
  293. BUG_ON(ret);
  294. root->node = read_tree_block(root,
  295. btrfs_root_blocknr(&root->root_item));
  296. BUG_ON(!root->node);
  297. return 0;
  298. }
  299. struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
  300. struct btrfs_key *location)
  301. {
  302. struct btrfs_root *root;
  303. struct btrfs_root *tree_root = fs_info->tree_root;
  304. struct btrfs_path *path;
  305. struct btrfs_leaf *l;
  306. u64 highest_inode;
  307. int ret = 0;
  308. root = radix_tree_lookup(&fs_info->fs_roots_radix,
  309. (unsigned long)location->objectid);
  310. if (root)
  311. return root;
  312. root = kmalloc(sizeof(*root), GFP_NOFS);
  313. if (!root)
  314. return ERR_PTR(-ENOMEM);
  315. if (location->offset == (u64)-1) {
  316. ret = find_and_setup_root(fs_info->sb->s_blocksize,
  317. fs_info->tree_root, fs_info,
  318. location->objectid, root);
  319. if (ret) {
  320. kfree(root);
  321. return ERR_PTR(ret);
  322. }
  323. goto insert;
  324. }
  325. __setup_root(fs_info->sb->s_blocksize, root, fs_info,
  326. location->objectid);
  327. path = btrfs_alloc_path();
  328. BUG_ON(!path);
  329. ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
  330. if (ret != 0) {
  331. if (ret > 0)
  332. ret = -ENOENT;
  333. goto out;
  334. }
  335. l = btrfs_buffer_leaf(path->nodes[0]);
  336. memcpy(&root->root_item,
  337. btrfs_item_ptr(l, path->slots[0], struct btrfs_root_item),
  338. sizeof(root->root_item));
  339. memcpy(&root->root_key, location, sizeof(*location));
  340. ret = 0;
  341. out:
  342. btrfs_release_path(root, path);
  343. btrfs_free_path(path);
  344. if (ret) {
  345. kfree(root);
  346. return ERR_PTR(ret);
  347. }
  348. root->node = read_tree_block(root,
  349. btrfs_root_blocknr(&root->root_item));
  350. BUG_ON(!root->node);
  351. insert:
  352. root->ref_cows = 1;
  353. ret = radix_tree_insert(&fs_info->fs_roots_radix,
  354. (unsigned long)root->root_key.objectid,
  355. root);
  356. if (ret) {
  357. brelse(root->node);
  358. kfree(root);
  359. return ERR_PTR(ret);
  360. }
  361. ret = btrfs_find_highest_inode(root, &highest_inode);
  362. if (ret == 0) {
  363. root->highest_inode = highest_inode;
  364. root->last_inode_alloc = highest_inode;
  365. }
  366. return root;
  367. }
  368. struct btrfs_root *open_ctree(struct super_block *sb)
  369. {
  370. struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
  371. GFP_NOFS);
  372. struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
  373. GFP_NOFS);
  374. struct btrfs_fs_info *fs_info = kmalloc(sizeof(*fs_info),
  375. GFP_NOFS);
  376. int ret;
  377. int err = -EIO;
  378. struct btrfs_super_block *disk_super;
  379. if (!extent_root || !tree_root || !fs_info) {
  380. err = -ENOMEM;
  381. goto fail;
  382. }
  383. init_bit_radix(&fs_info->pinned_radix);
  384. init_bit_radix(&fs_info->pending_del_radix);
  385. init_bit_radix(&fs_info->extent_map_radix);
  386. INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
  387. INIT_RADIX_TREE(&fs_info->block_group_radix, GFP_KERNEL);
  388. INIT_RADIX_TREE(&fs_info->block_group_data_radix, GFP_KERNEL);
  389. INIT_LIST_HEAD(&fs_info->trans_list);
  390. INIT_LIST_HEAD(&fs_info->dead_roots);
  391. sb_set_blocksize(sb, 4096);
  392. fs_info->running_transaction = NULL;
  393. fs_info->tree_root = tree_root;
  394. fs_info->extent_root = extent_root;
  395. fs_info->sb = sb;
  396. fs_info->btree_inode = new_inode(sb);
  397. fs_info->btree_inode->i_ino = 1;
  398. fs_info->btree_inode->i_nlink = 1;
  399. fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
  400. fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
  401. fs_info->do_barriers = 1;
  402. fs_info->extent_tree_insert_nr = 0;
  403. fs_info->extent_tree_prealloc_nr = 0;
  404. fs_info->closing = 0;
  405. INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
  406. BTRFS_I(fs_info->btree_inode)->root = tree_root;
  407. memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
  408. sizeof(struct btrfs_key));
  409. insert_inode_hash(fs_info->btree_inode);
  410. mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
  411. mutex_init(&fs_info->trans_mutex);
  412. mutex_init(&fs_info->fs_mutex);
  413. __setup_root(sb->s_blocksize, tree_root,
  414. fs_info, BTRFS_ROOT_TREE_OBJECTID);
  415. fs_info->sb_buffer = read_tree_block(tree_root,
  416. BTRFS_SUPER_INFO_OFFSET /
  417. sb->s_blocksize);
  418. if (!fs_info->sb_buffer)
  419. goto fail_iput;
  420. disk_super = (struct btrfs_super_block *)fs_info->sb_buffer->b_data;
  421. if (!btrfs_super_root(disk_super))
  422. goto fail_sb_buffer;
  423. i_size_write(fs_info->btree_inode,
  424. btrfs_super_total_blocks(disk_super) <<
  425. fs_info->btree_inode->i_blkbits);
  426. fs_info->disk_super = disk_super;
  427. if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
  428. sizeof(disk_super->magic))) {
  429. printk("btrfs: valid FS not found on %s\n", sb->s_id);
  430. goto fail_sb_buffer;
  431. }
  432. tree_root->node = read_tree_block(tree_root,
  433. btrfs_super_root(disk_super));
  434. if (!tree_root->node)
  435. goto fail_sb_buffer;
  436. mutex_lock(&fs_info->fs_mutex);
  437. ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
  438. BTRFS_EXTENT_TREE_OBJECTID, extent_root);
  439. if (ret) {
  440. mutex_unlock(&fs_info->fs_mutex);
  441. goto fail_tree_root;
  442. }
  443. btrfs_read_block_groups(extent_root);
  444. fs_info->generation = btrfs_super_generation(disk_super) + 1;
  445. mutex_unlock(&fs_info->fs_mutex);
  446. return tree_root;
  447. fail_tree_root:
  448. btrfs_block_release(tree_root, tree_root->node);
  449. fail_sb_buffer:
  450. btrfs_block_release(tree_root, fs_info->sb_buffer);
  451. fail_iput:
  452. iput(fs_info->btree_inode);
  453. fail:
  454. kfree(extent_root);
  455. kfree(tree_root);
  456. kfree(fs_info);
  457. return ERR_PTR(err);
  458. }
  459. int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
  460. *root)
  461. {
  462. int ret;
  463. struct buffer_head *bh = root->fs_info->sb_buffer;
  464. btrfs_set_super_root(root->fs_info->disk_super,
  465. bh_blocknr(root->fs_info->tree_root->node));
  466. lock_buffer(bh);
  467. WARN_ON(atomic_read(&bh->b_count) < 1);
  468. clear_buffer_dirty(bh);
  469. csum_tree_block(root, bh, 0);
  470. bh->b_end_io = end_buffer_write_sync;
  471. get_bh(bh);
  472. if (root->fs_info->do_barriers)
  473. ret = submit_bh(WRITE_BARRIER, bh);
  474. else
  475. ret = submit_bh(WRITE, bh);
  476. if (ret == -EOPNOTSUPP) {
  477. get_bh(bh);
  478. lock_buffer(bh);
  479. set_buffer_uptodate(bh);
  480. root->fs_info->do_barriers = 0;
  481. ret = submit_bh(WRITE, bh);
  482. }
  483. wait_on_buffer(bh);
  484. if (!buffer_uptodate(bh)) {
  485. WARN_ON(1);
  486. return -EIO;
  487. }
  488. return 0;
  489. }
  490. static int free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
  491. {
  492. radix_tree_delete(&fs_info->fs_roots_radix,
  493. (unsigned long)root->root_key.objectid);
  494. if (root->inode)
  495. iput(root->inode);
  496. if (root->node)
  497. brelse(root->node);
  498. if (root->commit_root)
  499. brelse(root->commit_root);
  500. kfree(root);
  501. return 0;
  502. }
  503. static int del_fs_roots(struct btrfs_fs_info *fs_info)
  504. {
  505. int ret;
  506. struct btrfs_root *gang[8];
  507. int i;
  508. while(1) {
  509. ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
  510. (void **)gang, 0,
  511. ARRAY_SIZE(gang));
  512. if (!ret)
  513. break;
  514. for (i = 0; i < ret; i++)
  515. free_fs_root(fs_info, gang[i]);
  516. }
  517. return 0;
  518. }
  519. int close_ctree(struct btrfs_root *root)
  520. {
  521. int ret;
  522. struct btrfs_trans_handle *trans;
  523. struct btrfs_fs_info *fs_info = root->fs_info;
  524. fs_info->closing = 1;
  525. btrfs_transaction_flush_work(root);
  526. mutex_lock(&fs_info->fs_mutex);
  527. trans = btrfs_start_transaction(root, 1);
  528. btrfs_commit_transaction(trans, root);
  529. /* run commit again to drop the original snapshot */
  530. trans = btrfs_start_transaction(root, 1);
  531. btrfs_commit_transaction(trans, root);
  532. ret = btrfs_write_and_wait_transaction(NULL, root);
  533. BUG_ON(ret);
  534. write_ctree_super(NULL, root);
  535. mutex_unlock(&fs_info->fs_mutex);
  536. if (fs_info->extent_root->node)
  537. btrfs_block_release(fs_info->extent_root,
  538. fs_info->extent_root->node);
  539. if (fs_info->tree_root->node)
  540. btrfs_block_release(fs_info->tree_root,
  541. fs_info->tree_root->node);
  542. btrfs_block_release(root, fs_info->sb_buffer);
  543. truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
  544. iput(fs_info->btree_inode);
  545. btrfs_free_block_groups(root->fs_info);
  546. del_fs_roots(fs_info);
  547. kfree(fs_info->extent_root);
  548. kfree(fs_info->tree_root);
  549. return 0;
  550. }
  551. void btrfs_block_release(struct btrfs_root *root, struct buffer_head *buf)
  552. {
  553. brelse(buf);
  554. }
  555. void btrfs_btree_balance_dirty(struct btrfs_root *root)
  556. {
  557. balance_dirty_pages_ratelimited(root->fs_info->btree_inode->i_mapping);
  558. }