disk-io.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. #include <linux/module.h>
  2. #include <linux/fs.h>
  3. #include <linux/blkdev.h>
  4. #include <linux/crypto.h>
  5. #include <linux/scatterlist.h>
  6. #include <linux/swap.h>
  7. #include <linux/radix-tree.h>
  8. #include "ctree.h"
  9. #include "disk-io.h"
  10. #include "transaction.h"
  11. #include "btrfs_inode.h"
  12. struct dev_lookup {
  13. u64 block_start;
  14. u64 num_blocks;
  15. u64 device_id;
  16. struct block_device *bdev;
  17. };
  18. int btrfs_insert_dev_radix(struct btrfs_root *root,
  19. struct block_device *bdev,
  20. u64 device_id,
  21. u64 block_start,
  22. u64 num_blocks)
  23. {
  24. struct dev_lookup *lookup;
  25. int ret;
  26. lookup = kmalloc(sizeof(*lookup), GFP_NOFS);
  27. if (!lookup)
  28. return -ENOMEM;
  29. lookup->block_start = block_start;
  30. lookup->num_blocks = num_blocks;
  31. lookup->bdev = bdev;
  32. lookup->device_id = device_id;
  33. ret = radix_tree_insert(&root->fs_info->dev_radix, block_start +
  34. num_blocks - 1, lookup);
  35. return ret;
  36. }
  37. u64 bh_blocknr(struct buffer_head *bh)
  38. {
  39. int blkbits = bh->b_page->mapping->host->i_blkbits;
  40. u64 blocknr = bh->b_page->index << (PAGE_CACHE_SHIFT - blkbits);
  41. unsigned long offset;
  42. if (PageHighMem(bh->b_page))
  43. offset = (unsigned long)bh->b_data;
  44. else
  45. offset = bh->b_data - (char *)page_address(bh->b_page);
  46. blocknr += offset >> (PAGE_CACHE_SHIFT - blkbits);
  47. return blocknr;
  48. }
  49. static int check_tree_block(struct btrfs_root *root, struct buffer_head *buf)
  50. {
  51. struct btrfs_node *node = btrfs_buffer_node(buf);
  52. if (bh_blocknr(buf) != btrfs_header_blocknr(&node->header)) {
  53. printk(KERN_CRIT "bh_blocknr(buf) is %Lu, header is %Lu\n",
  54. bh_blocknr(buf), btrfs_header_blocknr(&node->header));
  55. BUG();
  56. }
  57. return 0;
  58. }
  59. struct buffer_head *btrfs_find_tree_block(struct btrfs_root *root, u64 blocknr)
  60. {
  61. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  62. int blockbits = root->fs_info->sb->s_blocksize_bits;
  63. unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
  64. struct page *page;
  65. struct buffer_head *bh;
  66. struct buffer_head *head;
  67. struct buffer_head *ret = NULL;
  68. page = find_lock_page(mapping, index);
  69. if (!page)
  70. return NULL;
  71. if (!page_has_buffers(page))
  72. goto out_unlock;
  73. head = page_buffers(page);
  74. bh = head;
  75. do {
  76. if (buffer_mapped(bh) && bh_blocknr(bh) == blocknr) {
  77. ret = bh;
  78. get_bh(bh);
  79. goto out_unlock;
  80. }
  81. bh = bh->b_this_page;
  82. } while (bh != head);
  83. out_unlock:
  84. unlock_page(page);
  85. if (ret) {
  86. touch_buffer(ret);
  87. }
  88. page_cache_release(page);
  89. return ret;
  90. }
  91. int btrfs_map_bh_to_logical(struct btrfs_root *root, struct buffer_head *bh,
  92. u64 logical)
  93. {
  94. struct dev_lookup *lookup[2];
  95. int ret;
  96. if (logical == 0) {
  97. bh->b_bdev = NULL;
  98. bh->b_blocknr = 0;
  99. set_buffer_mapped(bh);
  100. return 0;
  101. }
  102. root = root->fs_info->dev_root;
  103. ret = radix_tree_gang_lookup(&root->fs_info->dev_radix,
  104. (void **)lookup,
  105. (unsigned long)logical,
  106. ARRAY_SIZE(lookup));
  107. if (ret == 0 || lookup[0]->block_start > logical ||
  108. lookup[0]->block_start + lookup[0]->num_blocks <= logical) {
  109. ret = -ENOENT;
  110. goto out;
  111. }
  112. bh->b_bdev = lookup[0]->bdev;
  113. bh->b_blocknr = logical - lookup[0]->block_start;
  114. set_buffer_mapped(bh);
  115. ret = 0;
  116. out:
  117. return ret;
  118. }
  119. struct buffer_head *btrfs_find_create_tree_block(struct btrfs_root *root,
  120. u64 blocknr)
  121. {
  122. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  123. int blockbits = root->fs_info->sb->s_blocksize_bits;
  124. unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
  125. struct page *page;
  126. struct buffer_head *bh;
  127. struct buffer_head *head;
  128. struct buffer_head *ret = NULL;
  129. int err;
  130. u64 first_block = index << (PAGE_CACHE_SHIFT - blockbits);
  131. page = grab_cache_page(mapping, index);
  132. if (!page)
  133. return NULL;
  134. if (!page_has_buffers(page))
  135. create_empty_buffers(page, root->fs_info->sb->s_blocksize, 0);
  136. head = page_buffers(page);
  137. bh = head;
  138. do {
  139. if (!buffer_mapped(bh)) {
  140. err = btrfs_map_bh_to_logical(root, bh, first_block);
  141. BUG_ON(err);
  142. }
  143. if (bh_blocknr(bh) == blocknr) {
  144. ret = bh;
  145. get_bh(bh);
  146. goto out_unlock;
  147. }
  148. bh = bh->b_this_page;
  149. first_block++;
  150. } while (bh != head);
  151. out_unlock:
  152. unlock_page(page);
  153. if (ret)
  154. touch_buffer(ret);
  155. page_cache_release(page);
  156. return ret;
  157. }
  158. static int btree_get_block(struct inode *inode, sector_t iblock,
  159. struct buffer_head *bh, int create)
  160. {
  161. int err;
  162. struct btrfs_root *root = BTRFS_I(bh->b_page->mapping->host)->root;
  163. err = btrfs_map_bh_to_logical(root, bh, iblock);
  164. return err;
  165. }
  166. int btrfs_csum_data(struct btrfs_root * root, char *data, size_t len,
  167. char *result)
  168. {
  169. struct scatterlist sg;
  170. struct crypto_hash *tfm = root->fs_info->hash_tfm;
  171. struct hash_desc desc;
  172. int ret;
  173. desc.tfm = tfm;
  174. desc.flags = 0;
  175. sg_init_one(&sg, data, len);
  176. spin_lock(&root->fs_info->hash_lock);
  177. ret = crypto_hash_digest(&desc, &sg, 1, result);
  178. spin_unlock(&root->fs_info->hash_lock);
  179. if (ret) {
  180. printk("sha256 digest failed\n");
  181. }
  182. return ret;
  183. }
  184. static int csum_tree_block(struct btrfs_root *root, struct buffer_head *bh,
  185. int verify)
  186. {
  187. char result[BTRFS_CSUM_SIZE];
  188. int ret;
  189. struct btrfs_node *node;
  190. ret = btrfs_csum_data(root, bh->b_data + BTRFS_CSUM_SIZE,
  191. bh->b_size - BTRFS_CSUM_SIZE, result);
  192. if (ret)
  193. return ret;
  194. if (verify) {
  195. if (memcmp(bh->b_data, result, BTRFS_CSUM_SIZE)) {
  196. printk("checksum verify failed on %Lu\n",
  197. bh_blocknr(bh));
  198. return 1;
  199. }
  200. } else {
  201. node = btrfs_buffer_node(bh);
  202. memcpy(node->header.csum, result, BTRFS_CSUM_SIZE);
  203. }
  204. return 0;
  205. }
  206. static int btree_writepage(struct page *page, struct writeback_control *wbc)
  207. {
  208. struct buffer_head *bh;
  209. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  210. struct buffer_head *head;
  211. if (!page_has_buffers(page)) {
  212. create_empty_buffers(page, root->fs_info->sb->s_blocksize,
  213. (1 << BH_Dirty)|(1 << BH_Uptodate));
  214. }
  215. head = page_buffers(page);
  216. bh = head;
  217. do {
  218. if (buffer_dirty(bh))
  219. csum_tree_block(root, bh, 0);
  220. bh = bh->b_this_page;
  221. } while (bh != head);
  222. return block_write_full_page(page, btree_get_block, wbc);
  223. }
  224. static int btree_readpage(struct file * file, struct page * page)
  225. {
  226. return block_read_full_page(page, btree_get_block);
  227. }
  228. static struct address_space_operations btree_aops = {
  229. .readpage = btree_readpage,
  230. .writepage = btree_writepage,
  231. .sync_page = block_sync_page,
  232. };
  233. struct buffer_head *read_tree_block(struct btrfs_root *root, u64 blocknr)
  234. {
  235. struct buffer_head *bh = NULL;
  236. bh = btrfs_find_create_tree_block(root, blocknr);
  237. if (!bh)
  238. return bh;
  239. if (buffer_uptodate(bh))
  240. goto uptodate;
  241. lock_buffer(bh);
  242. if (!buffer_uptodate(bh)) {
  243. get_bh(bh);
  244. bh->b_end_io = end_buffer_read_sync;
  245. submit_bh(READ, bh);
  246. wait_on_buffer(bh);
  247. if (!buffer_uptodate(bh))
  248. goto fail;
  249. csum_tree_block(root, bh, 1);
  250. } else {
  251. unlock_buffer(bh);
  252. }
  253. uptodate:
  254. if (check_tree_block(root, bh))
  255. BUG();
  256. return bh;
  257. fail:
  258. brelse(bh);
  259. return NULL;
  260. }
  261. int dirty_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  262. struct buffer_head *buf)
  263. {
  264. WARN_ON(atomic_read(&buf->b_count) == 0);
  265. mark_buffer_dirty(buf);
  266. return 0;
  267. }
  268. int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  269. struct buffer_head *buf)
  270. {
  271. WARN_ON(atomic_read(&buf->b_count) == 0);
  272. clear_buffer_dirty(buf);
  273. return 0;
  274. }
  275. static int __setup_root(int blocksize,
  276. struct btrfs_root *root,
  277. struct btrfs_fs_info *fs_info,
  278. u64 objectid)
  279. {
  280. root->node = NULL;
  281. root->inode = NULL;
  282. root->commit_root = NULL;
  283. root->blocksize = blocksize;
  284. root->ref_cows = 0;
  285. root->fs_info = fs_info;
  286. root->objectid = objectid;
  287. root->last_trans = 0;
  288. root->highest_inode = 0;
  289. root->last_inode_alloc = 0;
  290. memset(&root->root_key, 0, sizeof(root->root_key));
  291. memset(&root->root_item, 0, sizeof(root->root_item));
  292. root->root_key.objectid = objectid;
  293. return 0;
  294. }
  295. static int find_and_setup_root(int blocksize,
  296. struct btrfs_root *tree_root,
  297. struct btrfs_fs_info *fs_info,
  298. u64 objectid,
  299. struct btrfs_root *root)
  300. {
  301. int ret;
  302. __setup_root(blocksize, root, fs_info, objectid);
  303. ret = btrfs_find_last_root(tree_root, objectid,
  304. &root->root_item, &root->root_key);
  305. BUG_ON(ret);
  306. root->node = read_tree_block(root,
  307. btrfs_root_blocknr(&root->root_item));
  308. BUG_ON(!root->node);
  309. return 0;
  310. }
  311. struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
  312. struct btrfs_key *location)
  313. {
  314. struct btrfs_root *root;
  315. struct btrfs_root *tree_root = fs_info->tree_root;
  316. struct btrfs_path *path;
  317. struct btrfs_leaf *l;
  318. u64 highest_inode;
  319. int ret = 0;
  320. printk("read_fs_root looking for %Lu %Lu %u\n", location->objectid, location->offset, location->flags);
  321. root = radix_tree_lookup(&fs_info->fs_roots_radix,
  322. (unsigned long)location->objectid);
  323. if (root) {
  324. printk("found %p in cache\n", root);
  325. return root;
  326. }
  327. root = kmalloc(sizeof(*root), GFP_NOFS);
  328. if (!root) {
  329. printk("failed1\n");
  330. return ERR_PTR(-ENOMEM);
  331. }
  332. if (location->offset == (u64)-1) {
  333. ret = find_and_setup_root(fs_info->sb->s_blocksize,
  334. fs_info->tree_root, fs_info,
  335. location->objectid, root);
  336. if (ret) {
  337. printk("failed2\n");
  338. kfree(root);
  339. return ERR_PTR(ret);
  340. }
  341. goto insert;
  342. }
  343. __setup_root(fs_info->sb->s_blocksize, root, fs_info,
  344. location->objectid);
  345. path = btrfs_alloc_path();
  346. BUG_ON(!path);
  347. ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
  348. if (ret != 0) {
  349. printk("internal search_slot gives us %d\n", ret);
  350. if (ret > 0)
  351. ret = -ENOENT;
  352. goto out;
  353. }
  354. l = btrfs_buffer_leaf(path->nodes[0]);
  355. memcpy(&root->root_item,
  356. btrfs_item_ptr(l, path->slots[0], struct btrfs_root_item),
  357. sizeof(root->root_item));
  358. memcpy(&root->root_key, location, sizeof(*location));
  359. ret = 0;
  360. out:
  361. btrfs_release_path(root, path);
  362. btrfs_free_path(path);
  363. if (ret) {
  364. kfree(root);
  365. return ERR_PTR(ret);
  366. }
  367. root->node = read_tree_block(root,
  368. btrfs_root_blocknr(&root->root_item));
  369. BUG_ON(!root->node);
  370. insert:
  371. printk("inserting %p\n", root);
  372. root->ref_cows = 1;
  373. ret = radix_tree_insert(&fs_info->fs_roots_radix,
  374. (unsigned long)root->root_key.objectid,
  375. root);
  376. if (ret) {
  377. printk("radix_tree_insert gives us %d\n", ret);
  378. brelse(root->node);
  379. kfree(root);
  380. return ERR_PTR(ret);
  381. }
  382. ret = btrfs_find_highest_inode(root, &highest_inode);
  383. if (ret == 0) {
  384. root->highest_inode = highest_inode;
  385. root->last_inode_alloc = highest_inode;
  386. printk("highest inode is %Lu\n", highest_inode);
  387. }
  388. printk("all worked\n");
  389. return root;
  390. }
  391. static int btrfs_open_disk(struct btrfs_root *root, u64 device_id,
  392. u64 block_start, u64 num_blocks,
  393. char *filename, int name_len)
  394. {
  395. char *null_filename;
  396. struct block_device *bdev;
  397. int ret;
  398. null_filename = kmalloc(name_len + 1, GFP_NOFS);
  399. if (!null_filename)
  400. return -ENOMEM;
  401. memcpy(null_filename, filename, name_len);
  402. null_filename[name_len] = '\0';
  403. bdev = open_bdev_excl(null_filename, O_RDWR, root->fs_info->sb);
  404. if (IS_ERR(bdev)) {
  405. ret = PTR_ERR(bdev);
  406. goto out;
  407. }
  408. set_blocksize(bdev, root->fs_info->sb->s_blocksize);
  409. ret = btrfs_insert_dev_radix(root, bdev, device_id,
  410. block_start, num_blocks);
  411. BUG_ON(ret);
  412. ret = 0;
  413. out:
  414. kfree(null_filename);
  415. return ret;
  416. }
  417. static int read_device_info(struct btrfs_root *root)
  418. {
  419. struct btrfs_path *path;
  420. int ret;
  421. struct btrfs_key key;
  422. struct btrfs_leaf *leaf;
  423. struct btrfs_device_item *dev_item;
  424. int nritems;
  425. int slot;
  426. root = root->fs_info->dev_root;
  427. path = btrfs_alloc_path();
  428. if (!path)
  429. return -ENOMEM;
  430. key.objectid = 0;
  431. key.offset = 0;
  432. key.flags = 0;
  433. btrfs_set_key_type(&key, BTRFS_DEV_ITEM_KEY);
  434. mutex_lock(&root->fs_info->fs_mutex);
  435. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  436. leaf = btrfs_buffer_leaf(path->nodes[0]);
  437. nritems = btrfs_header_nritems(&leaf->header);
  438. while(1) {
  439. slot = path->slots[0];
  440. if (slot >= nritems) {
  441. ret = btrfs_next_leaf(root, path);
  442. if (ret)
  443. break;
  444. leaf = btrfs_buffer_leaf(path->nodes[0]);
  445. nritems = btrfs_header_nritems(&leaf->header);
  446. slot = path->slots[0];
  447. }
  448. btrfs_disk_key_to_cpu(&key, &leaf->items[slot].key);
  449. if (btrfs_key_type(&key) != BTRFS_DEV_ITEM_KEY) {
  450. path->slots[0]++;
  451. continue;
  452. }
  453. dev_item = btrfs_item_ptr(leaf, slot, struct btrfs_device_item);
  454. printk("found key %Lu %Lu\n", key.objectid, key.offset);
  455. if (btrfs_device_id(dev_item) !=
  456. btrfs_super_device_id(root->fs_info->disk_super)) {
  457. ret = btrfs_open_disk(root, btrfs_device_id(dev_item),
  458. key.objectid, key.offset,
  459. (char *)(dev_item + 1),
  460. btrfs_device_pathlen(dev_item));
  461. BUG_ON(ret);
  462. }
  463. path->slots[0]++;
  464. }
  465. btrfs_free_path(path);
  466. mutex_unlock(&root->fs_info->fs_mutex);
  467. return 0;
  468. }
  469. struct btrfs_root *open_ctree(struct super_block *sb)
  470. {
  471. struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
  472. GFP_NOFS);
  473. struct btrfs_root *dev_root = kmalloc(sizeof(struct btrfs_root),
  474. GFP_NOFS);
  475. struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
  476. GFP_NOFS);
  477. struct btrfs_fs_info *fs_info = kmalloc(sizeof(*fs_info),
  478. GFP_NOFS);
  479. int ret;
  480. struct btrfs_super_block *disk_super;
  481. struct dev_lookup *dev_lookup;
  482. init_bit_radix(&fs_info->pinned_radix);
  483. init_bit_radix(&fs_info->pending_del_radix);
  484. INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
  485. INIT_RADIX_TREE(&fs_info->dev_radix, GFP_NOFS);
  486. INIT_RADIX_TREE(&fs_info->block_group_radix, GFP_KERNEL);
  487. INIT_LIST_HEAD(&fs_info->trans_list);
  488. sb_set_blocksize(sb, 4096);
  489. fs_info->running_transaction = NULL;
  490. fs_info->tree_root = tree_root;
  491. fs_info->extent_root = extent_root;
  492. fs_info->dev_root = dev_root;
  493. fs_info->sb = sb;
  494. fs_info->btree_inode = new_inode(sb);
  495. fs_info->btree_inode->i_ino = 1;
  496. fs_info->btree_inode->i_nlink = 1;
  497. fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
  498. fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
  499. fs_info->do_barriers = 1;
  500. fs_info->extent_tree_insert_nr = 0;
  501. fs_info->extent_tree_prealloc_nr = 0;
  502. BTRFS_I(fs_info->btree_inode)->root = tree_root;
  503. memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
  504. sizeof(struct btrfs_key));
  505. insert_inode_hash(fs_info->btree_inode);
  506. mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
  507. fs_info->hash_tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
  508. spin_lock_init(&fs_info->hash_lock);
  509. if (!fs_info->hash_tfm || IS_ERR(fs_info->hash_tfm)) {
  510. printk("failed to allocate sha256 hash\n");
  511. return NULL;
  512. }
  513. mutex_init(&fs_info->trans_mutex);
  514. mutex_init(&fs_info->fs_mutex);
  515. fs_info->block_group_cache = NULL;
  516. __setup_root(sb->s_blocksize, dev_root,
  517. fs_info, BTRFS_DEV_TREE_OBJECTID);
  518. __setup_root(sb->s_blocksize, tree_root,
  519. fs_info, BTRFS_ROOT_TREE_OBJECTID);
  520. dev_lookup = kmalloc(sizeof(*dev_lookup), GFP_NOFS);
  521. dev_lookup->block_start = 0;
  522. dev_lookup->num_blocks = (u32)-2;
  523. dev_lookup->bdev = sb->s_bdev;
  524. dev_lookup->device_id = 0;
  525. ret = radix_tree_insert(&fs_info->dev_radix, (u32)-2, dev_lookup);
  526. BUG_ON(ret);
  527. fs_info->sb_buffer = read_tree_block(tree_root,
  528. BTRFS_SUPER_INFO_OFFSET /
  529. sb->s_blocksize);
  530. if (!fs_info->sb_buffer)
  531. return NULL;
  532. disk_super = (struct btrfs_super_block *)fs_info->sb_buffer->b_data;
  533. if (!btrfs_super_root(disk_super))
  534. return NULL;
  535. i_size_write(fs_info->btree_inode,
  536. btrfs_super_total_blocks(disk_super) <<
  537. fs_info->btree_inode->i_blkbits);
  538. radix_tree_delete(&fs_info->dev_radix, (u32)-2);
  539. dev_lookup->block_start = btrfs_super_device_block_start(disk_super);
  540. dev_lookup->num_blocks = btrfs_super_device_num_blocks(disk_super);
  541. dev_lookup->device_id = btrfs_super_device_id(disk_super);
  542. ret = radix_tree_insert(&fs_info->dev_radix,
  543. dev_lookup->block_start +
  544. dev_lookup->num_blocks - 1, dev_lookup);
  545. BUG_ON(ret);
  546. fs_info->disk_super = disk_super;
  547. dev_root->node = read_tree_block(tree_root,
  548. btrfs_super_device_root(disk_super));
  549. ret = read_device_info(dev_root);
  550. BUG_ON(ret);
  551. tree_root->node = read_tree_block(tree_root,
  552. btrfs_super_root(disk_super));
  553. BUG_ON(!tree_root->node);
  554. mutex_lock(&fs_info->fs_mutex);
  555. ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
  556. BTRFS_EXTENT_TREE_OBJECTID, extent_root);
  557. BUG_ON(ret);
  558. btrfs_read_block_groups(extent_root);
  559. fs_info->generation = btrfs_super_generation(disk_super) + 1;
  560. memset(&fs_info->kobj, 0, sizeof(fs_info->kobj));
  561. kobj_set_kset_s(fs_info, btrfs_subsys);
  562. kobject_set_name(&fs_info->kobj, "%s", sb->s_id);
  563. kobject_register(&fs_info->kobj);
  564. mutex_unlock(&fs_info->fs_mutex);
  565. return tree_root;
  566. }
  567. int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
  568. *root)
  569. {
  570. int ret;
  571. struct buffer_head *bh = root->fs_info->sb_buffer;
  572. btrfs_set_super_root(root->fs_info->disk_super,
  573. bh_blocknr(root->fs_info->tree_root->node));
  574. lock_buffer(bh);
  575. WARN_ON(atomic_read(&bh->b_count) < 1);
  576. clear_buffer_dirty(bh);
  577. csum_tree_block(root, bh, 0);
  578. bh->b_end_io = end_buffer_write_sync;
  579. get_bh(bh);
  580. if (root->fs_info->do_barriers)
  581. ret = submit_bh(WRITE_BARRIER, bh);
  582. else
  583. ret = submit_bh(WRITE, bh);
  584. if (ret == -EOPNOTSUPP) {
  585. set_buffer_uptodate(bh);
  586. root->fs_info->do_barriers = 0;
  587. ret = submit_bh(WRITE, bh);
  588. }
  589. wait_on_buffer(bh);
  590. if (!buffer_uptodate(bh)) {
  591. WARN_ON(1);
  592. return -EIO;
  593. }
  594. return 0;
  595. }
  596. static int free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
  597. {
  598. radix_tree_delete(&fs_info->fs_roots_radix,
  599. (unsigned long)root->root_key.objectid);
  600. if (root->inode)
  601. iput(root->inode);
  602. if (root->node)
  603. brelse(root->node);
  604. if (root->commit_root)
  605. brelse(root->commit_root);
  606. kfree(root);
  607. return 0;
  608. }
  609. int del_fs_roots(struct btrfs_fs_info *fs_info)
  610. {
  611. int ret;
  612. struct btrfs_root *gang[8];
  613. int i;
  614. while(1) {
  615. ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
  616. (void **)gang, 0,
  617. ARRAY_SIZE(gang));
  618. if (!ret)
  619. break;
  620. for (i = 0; i < ret; i++)
  621. free_fs_root(fs_info, gang[i]);
  622. }
  623. return 0;
  624. }
  625. static int free_dev_radix(struct btrfs_fs_info *fs_info)
  626. {
  627. struct dev_lookup *lookup[8];
  628. struct block_device *super_bdev = fs_info->sb->s_bdev;
  629. int ret;
  630. int i;
  631. while(1) {
  632. ret = radix_tree_gang_lookup(&fs_info->dev_radix,
  633. (void **)lookup, 0,
  634. ARRAY_SIZE(lookup));
  635. if (!ret)
  636. break;
  637. for (i = 0; i < ret; i++) {
  638. if (lookup[i]->bdev != super_bdev)
  639. close_bdev_excl(lookup[i]->bdev);
  640. radix_tree_delete(&fs_info->dev_radix,
  641. lookup[i]->block_start +
  642. lookup[i]->num_blocks - 1);
  643. kfree(lookup[i]);
  644. }
  645. }
  646. return 0;
  647. }
  648. int close_ctree(struct btrfs_root *root)
  649. {
  650. int ret;
  651. struct btrfs_trans_handle *trans;
  652. struct btrfs_fs_info *fs_info = root->fs_info;
  653. mutex_lock(&fs_info->fs_mutex);
  654. trans = btrfs_start_transaction(root, 1);
  655. btrfs_commit_transaction(trans, root);
  656. /* run commit again to drop the original snapshot */
  657. trans = btrfs_start_transaction(root, 1);
  658. btrfs_commit_transaction(trans, root);
  659. ret = btrfs_write_and_wait_transaction(NULL, root);
  660. BUG_ON(ret);
  661. write_ctree_super(NULL, root);
  662. mutex_unlock(&fs_info->fs_mutex);
  663. if (fs_info->extent_root->node)
  664. btrfs_block_release(fs_info->extent_root,
  665. fs_info->extent_root->node);
  666. if (fs_info->dev_root->node)
  667. btrfs_block_release(fs_info->dev_root,
  668. fs_info->dev_root->node);
  669. if (fs_info->tree_root->node)
  670. btrfs_block_release(fs_info->tree_root,
  671. fs_info->tree_root->node);
  672. btrfs_block_release(root, fs_info->sb_buffer);
  673. crypto_free_hash(fs_info->hash_tfm);
  674. truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
  675. iput(fs_info->btree_inode);
  676. free_dev_radix(fs_info);
  677. btrfs_free_block_groups(root->fs_info);
  678. del_fs_roots(fs_info);
  679. kfree(fs_info->extent_root);
  680. kfree(fs_info->tree_root);
  681. kobject_unregister(&fs_info->kobj);
  682. return 0;
  683. }
  684. void btrfs_block_release(struct btrfs_root *root, struct buffer_head *buf)
  685. {
  686. brelse(buf);
  687. }