disk-io.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. #include <linux/module.h>
  2. #include <linux/fs.h>
  3. #include <linux/blkdev.h>
  4. #include <linux/crypto.h>
  5. #include <linux/scatterlist.h>
  6. #include <linux/swap.h>
  7. #include <linux/radix-tree.h>
  8. #include <linux/writeback.h>
  9. #include "ctree.h"
  10. #include "disk-io.h"
  11. #include "transaction.h"
  12. #include "btrfs_inode.h"
  13. struct dev_lookup {
  14. u64 block_start;
  15. u64 num_blocks;
  16. u64 device_id;
  17. struct block_device *bdev;
  18. };
  19. int btrfs_insert_dev_radix(struct btrfs_root *root,
  20. struct block_device *bdev,
  21. u64 device_id,
  22. u64 block_start,
  23. u64 num_blocks)
  24. {
  25. struct dev_lookup *lookup;
  26. int ret;
  27. lookup = kmalloc(sizeof(*lookup), GFP_NOFS);
  28. if (!lookup)
  29. return -ENOMEM;
  30. lookup->block_start = block_start;
  31. lookup->num_blocks = num_blocks;
  32. lookup->bdev = bdev;
  33. lookup->device_id = device_id;
  34. ret = radix_tree_insert(&root->fs_info->dev_radix, block_start +
  35. num_blocks - 1, lookup);
  36. return ret;
  37. }
  38. u64 bh_blocknr(struct buffer_head *bh)
  39. {
  40. int blkbits = bh->b_page->mapping->host->i_blkbits;
  41. u64 blocknr = bh->b_page->index << (PAGE_CACHE_SHIFT - blkbits);
  42. unsigned long offset;
  43. if (PageHighMem(bh->b_page))
  44. offset = (unsigned long)bh->b_data;
  45. else
  46. offset = bh->b_data - (char *)page_address(bh->b_page);
  47. blocknr += offset >> (PAGE_CACHE_SHIFT - blkbits);
  48. return blocknr;
  49. }
  50. static int check_tree_block(struct btrfs_root *root, struct buffer_head *buf)
  51. {
  52. struct btrfs_node *node = btrfs_buffer_node(buf);
  53. if (bh_blocknr(buf) != btrfs_header_blocknr(&node->header)) {
  54. printk(KERN_CRIT "bh_blocknr(buf) is %Lu, header is %Lu\n",
  55. bh_blocknr(buf), btrfs_header_blocknr(&node->header));
  56. BUG();
  57. }
  58. return 0;
  59. }
  60. struct buffer_head *btrfs_find_tree_block(struct btrfs_root *root, u64 blocknr)
  61. {
  62. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  63. int blockbits = root->fs_info->sb->s_blocksize_bits;
  64. unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
  65. struct page *page;
  66. struct buffer_head *bh;
  67. struct buffer_head *head;
  68. struct buffer_head *ret = NULL;
  69. page = find_lock_page(mapping, index);
  70. if (!page)
  71. return NULL;
  72. if (!page_has_buffers(page))
  73. goto out_unlock;
  74. head = page_buffers(page);
  75. bh = head;
  76. do {
  77. if (buffer_mapped(bh) && bh_blocknr(bh) == blocknr) {
  78. ret = bh;
  79. get_bh(bh);
  80. goto out_unlock;
  81. }
  82. bh = bh->b_this_page;
  83. } while (bh != head);
  84. out_unlock:
  85. unlock_page(page);
  86. page_cache_release(page);
  87. return ret;
  88. }
  89. int btrfs_map_bh_to_logical(struct btrfs_root *root, struct buffer_head *bh,
  90. u64 logical)
  91. {
  92. struct dev_lookup *lookup[2];
  93. int ret;
  94. if (logical == 0) {
  95. bh->b_bdev = NULL;
  96. bh->b_blocknr = 0;
  97. set_buffer_mapped(bh);
  98. return 0;
  99. }
  100. root = root->fs_info->dev_root;
  101. ret = radix_tree_gang_lookup(&root->fs_info->dev_radix,
  102. (void **)lookup,
  103. (unsigned long)logical,
  104. ARRAY_SIZE(lookup));
  105. if (ret == 0 || lookup[0]->block_start > logical ||
  106. lookup[0]->block_start + lookup[0]->num_blocks <= logical) {
  107. ret = -ENOENT;
  108. goto out;
  109. }
  110. bh->b_bdev = lookup[0]->bdev;
  111. bh->b_blocknr = logical - lookup[0]->block_start;
  112. set_buffer_mapped(bh);
  113. ret = 0;
  114. out:
  115. return ret;
  116. }
  117. struct buffer_head *btrfs_find_create_tree_block(struct btrfs_root *root,
  118. u64 blocknr)
  119. {
  120. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  121. int blockbits = root->fs_info->sb->s_blocksize_bits;
  122. unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
  123. struct page *page;
  124. struct buffer_head *bh;
  125. struct buffer_head *head;
  126. struct buffer_head *ret = NULL;
  127. int err;
  128. u64 first_block = index << (PAGE_CACHE_SHIFT - blockbits);
  129. page = grab_cache_page(mapping, index);
  130. if (!page)
  131. return NULL;
  132. if (!page_has_buffers(page))
  133. create_empty_buffers(page, root->fs_info->sb->s_blocksize, 0);
  134. head = page_buffers(page);
  135. bh = head;
  136. do {
  137. if (!buffer_mapped(bh)) {
  138. err = btrfs_map_bh_to_logical(root, bh, first_block);
  139. BUG_ON(err);
  140. }
  141. if (bh_blocknr(bh) == blocknr) {
  142. ret = bh;
  143. get_bh(bh);
  144. goto out_unlock;
  145. }
  146. bh = bh->b_this_page;
  147. first_block++;
  148. } while (bh != head);
  149. out_unlock:
  150. unlock_page(page);
  151. if (ret)
  152. touch_buffer(ret);
  153. page_cache_release(page);
  154. return ret;
  155. }
  156. static int btree_get_block(struct inode *inode, sector_t iblock,
  157. struct buffer_head *bh, int create)
  158. {
  159. int err;
  160. struct btrfs_root *root = BTRFS_I(bh->b_page->mapping->host)->root;
  161. err = btrfs_map_bh_to_logical(root, bh, iblock);
  162. return err;
  163. }
  164. int btrfs_csum_data(struct btrfs_root * root, char *data, size_t len,
  165. char *result)
  166. {
  167. struct scatterlist sg;
  168. struct crypto_hash *tfm = root->fs_info->hash_tfm;
  169. struct hash_desc desc;
  170. int ret;
  171. desc.tfm = tfm;
  172. desc.flags = 0;
  173. sg_init_one(&sg, data, len);
  174. spin_lock(&root->fs_info->hash_lock);
  175. ret = crypto_hash_digest(&desc, &sg, 1, result);
  176. spin_unlock(&root->fs_info->hash_lock);
  177. if (ret) {
  178. printk("digest failed\n");
  179. }
  180. return ret;
  181. }
  182. static int csum_tree_block(struct btrfs_root *root, struct buffer_head *bh,
  183. int verify)
  184. {
  185. char result[BTRFS_CRC32_SIZE];
  186. int ret;
  187. struct btrfs_node *node;
  188. ret = btrfs_csum_data(root, bh->b_data + BTRFS_CSUM_SIZE,
  189. bh->b_size - BTRFS_CSUM_SIZE, result);
  190. if (ret)
  191. return ret;
  192. if (verify) {
  193. if (memcmp(bh->b_data, result, BTRFS_CRC32_SIZE)) {
  194. printk("checksum verify failed on %Lu\n",
  195. bh_blocknr(bh));
  196. return 1;
  197. }
  198. } else {
  199. node = btrfs_buffer_node(bh);
  200. memcpy(node->header.csum, result, BTRFS_CRC32_SIZE);
  201. }
  202. return 0;
  203. }
  204. static int btree_writepage(struct page *page, struct writeback_control *wbc)
  205. {
  206. struct buffer_head *bh;
  207. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  208. struct buffer_head *head;
  209. if (!page_has_buffers(page)) {
  210. create_empty_buffers(page, root->fs_info->sb->s_blocksize,
  211. (1 << BH_Dirty)|(1 << BH_Uptodate));
  212. }
  213. head = page_buffers(page);
  214. bh = head;
  215. do {
  216. if (buffer_dirty(bh))
  217. csum_tree_block(root, bh, 0);
  218. bh = bh->b_this_page;
  219. } while (bh != head);
  220. return block_write_full_page(page, btree_get_block, wbc);
  221. }
  222. static int btree_readpage(struct file * file, struct page * page)
  223. {
  224. return block_read_full_page(page, btree_get_block);
  225. }
  226. static struct address_space_operations btree_aops = {
  227. .readpage = btree_readpage,
  228. .writepage = btree_writepage,
  229. .sync_page = block_sync_page,
  230. };
  231. int readahead_tree_block(struct btrfs_root *root, u64 blocknr)
  232. {
  233. struct buffer_head *bh = NULL;
  234. int ret = 0;
  235. bh = btrfs_find_create_tree_block(root, blocknr);
  236. if (!bh)
  237. return 0;
  238. if (buffer_uptodate(bh)) {
  239. ret = 1;
  240. goto done;
  241. }
  242. if (test_set_buffer_locked(bh)) {
  243. ret = 1;
  244. goto done;
  245. }
  246. if (!buffer_uptodate(bh)) {
  247. get_bh(bh);
  248. bh->b_end_io = end_buffer_read_sync;
  249. submit_bh(READ, bh);
  250. } else {
  251. unlock_buffer(bh);
  252. ret = 1;
  253. }
  254. done:
  255. brelse(bh);
  256. return ret;
  257. }
  258. struct buffer_head *read_tree_block(struct btrfs_root *root, u64 blocknr)
  259. {
  260. struct buffer_head *bh = NULL;
  261. bh = btrfs_find_create_tree_block(root, blocknr);
  262. if (!bh)
  263. return bh;
  264. if (buffer_uptodate(bh))
  265. goto uptodate;
  266. lock_buffer(bh);
  267. if (!buffer_uptodate(bh)) {
  268. get_bh(bh);
  269. bh->b_end_io = end_buffer_read_sync;
  270. submit_bh(READ, bh);
  271. wait_on_buffer(bh);
  272. if (!buffer_uptodate(bh))
  273. goto fail;
  274. } else {
  275. unlock_buffer(bh);
  276. }
  277. uptodate:
  278. if (!buffer_checked(bh)) {
  279. csum_tree_block(root, bh, 1);
  280. set_buffer_checked(bh);
  281. }
  282. if (check_tree_block(root, bh))
  283. BUG();
  284. return bh;
  285. fail:
  286. brelse(bh);
  287. return NULL;
  288. }
  289. int dirty_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  290. struct buffer_head *buf)
  291. {
  292. WARN_ON(atomic_read(&buf->b_count) == 0);
  293. mark_buffer_dirty(buf);
  294. return 0;
  295. }
  296. int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  297. struct buffer_head *buf)
  298. {
  299. WARN_ON(atomic_read(&buf->b_count) == 0);
  300. clear_buffer_dirty(buf);
  301. return 0;
  302. }
  303. static int __setup_root(int blocksize,
  304. struct btrfs_root *root,
  305. struct btrfs_fs_info *fs_info,
  306. u64 objectid)
  307. {
  308. root->node = NULL;
  309. root->inode = NULL;
  310. root->commit_root = NULL;
  311. root->blocksize = blocksize;
  312. root->ref_cows = 0;
  313. root->fs_info = fs_info;
  314. root->objectid = objectid;
  315. root->last_trans = 0;
  316. root->highest_inode = 0;
  317. root->last_inode_alloc = 0;
  318. memset(&root->root_key, 0, sizeof(root->root_key));
  319. memset(&root->root_item, 0, sizeof(root->root_item));
  320. root->root_key.objectid = objectid;
  321. return 0;
  322. }
  323. static int find_and_setup_root(int blocksize,
  324. struct btrfs_root *tree_root,
  325. struct btrfs_fs_info *fs_info,
  326. u64 objectid,
  327. struct btrfs_root *root)
  328. {
  329. int ret;
  330. __setup_root(blocksize, root, fs_info, objectid);
  331. ret = btrfs_find_last_root(tree_root, objectid,
  332. &root->root_item, &root->root_key);
  333. BUG_ON(ret);
  334. root->node = read_tree_block(root,
  335. btrfs_root_blocknr(&root->root_item));
  336. BUG_ON(!root->node);
  337. return 0;
  338. }
  339. struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
  340. struct btrfs_key *location)
  341. {
  342. struct btrfs_root *root;
  343. struct btrfs_root *tree_root = fs_info->tree_root;
  344. struct btrfs_path *path;
  345. struct btrfs_leaf *l;
  346. u64 highest_inode;
  347. int ret = 0;
  348. printk("read_fs_root looking for %Lu %Lu %u\n", location->objectid, location->offset, location->flags);
  349. root = radix_tree_lookup(&fs_info->fs_roots_radix,
  350. (unsigned long)location->objectid);
  351. if (root) {
  352. printk("found %p in cache\n", root);
  353. return root;
  354. }
  355. root = kmalloc(sizeof(*root), GFP_NOFS);
  356. if (!root) {
  357. printk("failed1\n");
  358. return ERR_PTR(-ENOMEM);
  359. }
  360. if (location->offset == (u64)-1) {
  361. ret = find_and_setup_root(fs_info->sb->s_blocksize,
  362. fs_info->tree_root, fs_info,
  363. location->objectid, root);
  364. if (ret) {
  365. printk("failed2\n");
  366. kfree(root);
  367. return ERR_PTR(ret);
  368. }
  369. goto insert;
  370. }
  371. __setup_root(fs_info->sb->s_blocksize, root, fs_info,
  372. location->objectid);
  373. path = btrfs_alloc_path();
  374. BUG_ON(!path);
  375. ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
  376. if (ret != 0) {
  377. printk("internal search_slot gives us %d\n", ret);
  378. if (ret > 0)
  379. ret = -ENOENT;
  380. goto out;
  381. }
  382. l = btrfs_buffer_leaf(path->nodes[0]);
  383. memcpy(&root->root_item,
  384. btrfs_item_ptr(l, path->slots[0], struct btrfs_root_item),
  385. sizeof(root->root_item));
  386. memcpy(&root->root_key, location, sizeof(*location));
  387. ret = 0;
  388. out:
  389. btrfs_release_path(root, path);
  390. btrfs_free_path(path);
  391. if (ret) {
  392. kfree(root);
  393. return ERR_PTR(ret);
  394. }
  395. root->node = read_tree_block(root,
  396. btrfs_root_blocknr(&root->root_item));
  397. BUG_ON(!root->node);
  398. insert:
  399. printk("inserting %p\n", root);
  400. root->ref_cows = 1;
  401. ret = radix_tree_insert(&fs_info->fs_roots_radix,
  402. (unsigned long)root->root_key.objectid,
  403. root);
  404. if (ret) {
  405. printk("radix_tree_insert gives us %d\n", ret);
  406. brelse(root->node);
  407. kfree(root);
  408. return ERR_PTR(ret);
  409. }
  410. ret = btrfs_find_highest_inode(root, &highest_inode);
  411. if (ret == 0) {
  412. root->highest_inode = highest_inode;
  413. root->last_inode_alloc = highest_inode;
  414. printk("highest inode is %Lu\n", highest_inode);
  415. }
  416. printk("all worked\n");
  417. return root;
  418. }
  419. static int btrfs_open_disk(struct btrfs_root *root, u64 device_id,
  420. u64 block_start, u64 num_blocks,
  421. char *filename, int name_len)
  422. {
  423. char *null_filename;
  424. struct block_device *bdev;
  425. int ret;
  426. null_filename = kmalloc(name_len + 1, GFP_NOFS);
  427. if (!null_filename)
  428. return -ENOMEM;
  429. memcpy(null_filename, filename, name_len);
  430. null_filename[name_len] = '\0';
  431. bdev = open_bdev_excl(null_filename, O_RDWR, root->fs_info->sb);
  432. if (IS_ERR(bdev)) {
  433. ret = PTR_ERR(bdev);
  434. goto out;
  435. }
  436. set_blocksize(bdev, root->fs_info->sb->s_blocksize);
  437. ret = btrfs_insert_dev_radix(root, bdev, device_id,
  438. block_start, num_blocks);
  439. BUG_ON(ret);
  440. ret = 0;
  441. out:
  442. kfree(null_filename);
  443. return ret;
  444. }
  445. static int read_device_info(struct btrfs_root *root)
  446. {
  447. struct btrfs_path *path;
  448. int ret;
  449. struct btrfs_key key;
  450. struct btrfs_leaf *leaf;
  451. struct btrfs_device_item *dev_item;
  452. int nritems;
  453. int slot;
  454. root = root->fs_info->dev_root;
  455. path = btrfs_alloc_path();
  456. if (!path)
  457. return -ENOMEM;
  458. key.objectid = 0;
  459. key.offset = 0;
  460. key.flags = 0;
  461. btrfs_set_key_type(&key, BTRFS_DEV_ITEM_KEY);
  462. mutex_lock(&root->fs_info->fs_mutex);
  463. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  464. leaf = btrfs_buffer_leaf(path->nodes[0]);
  465. nritems = btrfs_header_nritems(&leaf->header);
  466. while(1) {
  467. slot = path->slots[0];
  468. if (slot >= nritems) {
  469. ret = btrfs_next_leaf(root, path);
  470. if (ret)
  471. break;
  472. leaf = btrfs_buffer_leaf(path->nodes[0]);
  473. nritems = btrfs_header_nritems(&leaf->header);
  474. slot = path->slots[0];
  475. }
  476. btrfs_disk_key_to_cpu(&key, &leaf->items[slot].key);
  477. if (btrfs_key_type(&key) != BTRFS_DEV_ITEM_KEY) {
  478. path->slots[0]++;
  479. continue;
  480. }
  481. dev_item = btrfs_item_ptr(leaf, slot, struct btrfs_device_item);
  482. printk("found key %Lu %Lu\n", key.objectid, key.offset);
  483. if (btrfs_device_id(dev_item) !=
  484. btrfs_super_device_id(root->fs_info->disk_super)) {
  485. ret = btrfs_open_disk(root, btrfs_device_id(dev_item),
  486. key.objectid, key.offset,
  487. (char *)(dev_item + 1),
  488. btrfs_device_pathlen(dev_item));
  489. BUG_ON(ret);
  490. }
  491. path->slots[0]++;
  492. }
  493. btrfs_free_path(path);
  494. mutex_unlock(&root->fs_info->fs_mutex);
  495. return 0;
  496. }
  497. struct btrfs_root *open_ctree(struct super_block *sb)
  498. {
  499. struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
  500. GFP_NOFS);
  501. struct btrfs_root *dev_root = kmalloc(sizeof(struct btrfs_root),
  502. GFP_NOFS);
  503. struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
  504. GFP_NOFS);
  505. struct btrfs_fs_info *fs_info = kmalloc(sizeof(*fs_info),
  506. GFP_NOFS);
  507. int ret;
  508. struct btrfs_super_block *disk_super;
  509. struct dev_lookup *dev_lookup;
  510. init_bit_radix(&fs_info->pinned_radix);
  511. init_bit_radix(&fs_info->pending_del_radix);
  512. init_bit_radix(&fs_info->extent_map_radix);
  513. INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
  514. INIT_RADIX_TREE(&fs_info->dev_radix, GFP_NOFS);
  515. INIT_RADIX_TREE(&fs_info->block_group_radix, GFP_KERNEL);
  516. INIT_RADIX_TREE(&fs_info->block_group_data_radix, GFP_KERNEL);
  517. INIT_LIST_HEAD(&fs_info->trans_list);
  518. INIT_LIST_HEAD(&fs_info->dead_roots);
  519. sb_set_blocksize(sb, 4096);
  520. fs_info->running_transaction = NULL;
  521. fs_info->tree_root = tree_root;
  522. fs_info->extent_root = extent_root;
  523. fs_info->dev_root = dev_root;
  524. fs_info->sb = sb;
  525. fs_info->btree_inode = new_inode(sb);
  526. fs_info->btree_inode->i_ino = 1;
  527. fs_info->btree_inode->i_nlink = 1;
  528. fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
  529. fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
  530. fs_info->do_barriers = 1;
  531. fs_info->extent_tree_insert_nr = 0;
  532. fs_info->extent_tree_prealloc_nr = 0;
  533. fs_info->closing = 0;
  534. INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
  535. BTRFS_I(fs_info->btree_inode)->root = tree_root;
  536. memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
  537. sizeof(struct btrfs_key));
  538. insert_inode_hash(fs_info->btree_inode);
  539. mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
  540. fs_info->hash_tfm = crypto_alloc_hash("crc32c", 0, CRYPTO_ALG_ASYNC);
  541. spin_lock_init(&fs_info->hash_lock);
  542. if (!fs_info->hash_tfm || IS_ERR(fs_info->hash_tfm)) {
  543. printk("failed to allocate digest hash\n");
  544. return NULL;
  545. }
  546. mutex_init(&fs_info->trans_mutex);
  547. mutex_init(&fs_info->fs_mutex);
  548. __setup_root(sb->s_blocksize, dev_root,
  549. fs_info, BTRFS_DEV_TREE_OBJECTID);
  550. __setup_root(sb->s_blocksize, tree_root,
  551. fs_info, BTRFS_ROOT_TREE_OBJECTID);
  552. dev_lookup = kmalloc(sizeof(*dev_lookup), GFP_NOFS);
  553. dev_lookup->block_start = 0;
  554. dev_lookup->num_blocks = (u32)-2;
  555. dev_lookup->bdev = sb->s_bdev;
  556. dev_lookup->device_id = 0;
  557. ret = radix_tree_insert(&fs_info->dev_radix, (u32)-2, dev_lookup);
  558. BUG_ON(ret);
  559. fs_info->sb_buffer = read_tree_block(tree_root,
  560. BTRFS_SUPER_INFO_OFFSET /
  561. sb->s_blocksize);
  562. if (!fs_info->sb_buffer)
  563. return NULL;
  564. disk_super = (struct btrfs_super_block *)fs_info->sb_buffer->b_data;
  565. if (!btrfs_super_root(disk_super))
  566. return NULL;
  567. i_size_write(fs_info->btree_inode,
  568. btrfs_super_total_blocks(disk_super) <<
  569. fs_info->btree_inode->i_blkbits);
  570. radix_tree_delete(&fs_info->dev_radix, (u32)-2);
  571. dev_lookup->block_start = btrfs_super_device_block_start(disk_super);
  572. dev_lookup->num_blocks = btrfs_super_device_num_blocks(disk_super);
  573. dev_lookup->device_id = btrfs_super_device_id(disk_super);
  574. ret = radix_tree_insert(&fs_info->dev_radix,
  575. dev_lookup->block_start +
  576. dev_lookup->num_blocks - 1, dev_lookup);
  577. BUG_ON(ret);
  578. fs_info->disk_super = disk_super;
  579. dev_root->node = read_tree_block(tree_root,
  580. btrfs_super_device_root(disk_super));
  581. ret = read_device_info(dev_root);
  582. BUG_ON(ret);
  583. tree_root->node = read_tree_block(tree_root,
  584. btrfs_super_root(disk_super));
  585. BUG_ON(!tree_root->node);
  586. mutex_lock(&fs_info->fs_mutex);
  587. ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
  588. BTRFS_EXTENT_TREE_OBJECTID, extent_root);
  589. BUG_ON(ret);
  590. btrfs_read_block_groups(extent_root);
  591. fs_info->generation = btrfs_super_generation(disk_super) + 1;
  592. mutex_unlock(&fs_info->fs_mutex);
  593. return tree_root;
  594. }
  595. int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
  596. *root)
  597. {
  598. int ret;
  599. struct buffer_head *bh = root->fs_info->sb_buffer;
  600. btrfs_set_super_root(root->fs_info->disk_super,
  601. bh_blocknr(root->fs_info->tree_root->node));
  602. lock_buffer(bh);
  603. WARN_ON(atomic_read(&bh->b_count) < 1);
  604. clear_buffer_dirty(bh);
  605. csum_tree_block(root, bh, 0);
  606. bh->b_end_io = end_buffer_write_sync;
  607. get_bh(bh);
  608. if (root->fs_info->do_barriers)
  609. ret = submit_bh(WRITE_BARRIER, bh);
  610. else
  611. ret = submit_bh(WRITE, bh);
  612. if (ret == -EOPNOTSUPP) {
  613. set_buffer_uptodate(bh);
  614. root->fs_info->do_barriers = 0;
  615. ret = submit_bh(WRITE, bh);
  616. }
  617. wait_on_buffer(bh);
  618. if (!buffer_uptodate(bh)) {
  619. WARN_ON(1);
  620. return -EIO;
  621. }
  622. return 0;
  623. }
  624. static int free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
  625. {
  626. radix_tree_delete(&fs_info->fs_roots_radix,
  627. (unsigned long)root->root_key.objectid);
  628. if (root->inode)
  629. iput(root->inode);
  630. if (root->node)
  631. brelse(root->node);
  632. if (root->commit_root)
  633. brelse(root->commit_root);
  634. kfree(root);
  635. return 0;
  636. }
  637. static int del_fs_roots(struct btrfs_fs_info *fs_info)
  638. {
  639. int ret;
  640. struct btrfs_root *gang[8];
  641. int i;
  642. while(1) {
  643. ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
  644. (void **)gang, 0,
  645. ARRAY_SIZE(gang));
  646. if (!ret)
  647. break;
  648. for (i = 0; i < ret; i++)
  649. free_fs_root(fs_info, gang[i]);
  650. }
  651. return 0;
  652. }
  653. static int free_dev_radix(struct btrfs_fs_info *fs_info)
  654. {
  655. struct dev_lookup *lookup[8];
  656. struct block_device *super_bdev = fs_info->sb->s_bdev;
  657. int ret;
  658. int i;
  659. while(1) {
  660. ret = radix_tree_gang_lookup(&fs_info->dev_radix,
  661. (void **)lookup, 0,
  662. ARRAY_SIZE(lookup));
  663. if (!ret)
  664. break;
  665. for (i = 0; i < ret; i++) {
  666. if (lookup[i]->bdev != super_bdev)
  667. close_bdev_excl(lookup[i]->bdev);
  668. radix_tree_delete(&fs_info->dev_radix,
  669. lookup[i]->block_start +
  670. lookup[i]->num_blocks - 1);
  671. kfree(lookup[i]);
  672. }
  673. }
  674. return 0;
  675. }
  676. int close_ctree(struct btrfs_root *root)
  677. {
  678. int ret;
  679. struct btrfs_trans_handle *trans;
  680. struct btrfs_fs_info *fs_info = root->fs_info;
  681. fs_info->closing = 1;
  682. btrfs_transaction_flush_work(root);
  683. mutex_lock(&fs_info->fs_mutex);
  684. trans = btrfs_start_transaction(root, 1);
  685. btrfs_commit_transaction(trans, root);
  686. /* run commit again to drop the original snapshot */
  687. trans = btrfs_start_transaction(root, 1);
  688. btrfs_commit_transaction(trans, root);
  689. ret = btrfs_write_and_wait_transaction(NULL, root);
  690. BUG_ON(ret);
  691. write_ctree_super(NULL, root);
  692. mutex_unlock(&fs_info->fs_mutex);
  693. if (fs_info->extent_root->node)
  694. btrfs_block_release(fs_info->extent_root,
  695. fs_info->extent_root->node);
  696. if (fs_info->dev_root->node)
  697. btrfs_block_release(fs_info->dev_root,
  698. fs_info->dev_root->node);
  699. if (fs_info->tree_root->node)
  700. btrfs_block_release(fs_info->tree_root,
  701. fs_info->tree_root->node);
  702. btrfs_block_release(root, fs_info->sb_buffer);
  703. crypto_free_hash(fs_info->hash_tfm);
  704. truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
  705. iput(fs_info->btree_inode);
  706. free_dev_radix(fs_info);
  707. btrfs_free_block_groups(root->fs_info);
  708. del_fs_roots(fs_info);
  709. kfree(fs_info->extent_root);
  710. kfree(fs_info->tree_root);
  711. return 0;
  712. }
  713. void btrfs_block_release(struct btrfs_root *root, struct buffer_head *buf)
  714. {
  715. brelse(buf);
  716. }
  717. void btrfs_btree_balance_dirty(struct btrfs_root *root)
  718. {
  719. balance_dirty_pages_ratelimited(root->fs_info->btree_inode->i_mapping);
  720. }