disk-io.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. #include <linux/module.h>
  2. #include <linux/fs.h>
  3. #include <linux/blkdev.h>
  4. #include <linux/crypto.h>
  5. #include <linux/scatterlist.h>
  6. #include <linux/swap.h>
  7. #include "ctree.h"
  8. #include "disk-io.h"
  9. #include "transaction.h"
  10. #define PATTERN 0xDEADBEEFUL
  11. static inline void check_pattern(struct buffer_head *buf)
  12. {
  13. if (buf->b_private != (void *)PATTERN)
  14. WARN_ON(1);
  15. }
  16. static inline void set_pattern(struct buffer_head *buf)
  17. {
  18. buf->b_private = (void *)PATTERN;
  19. }
  20. static int check_tree_block(struct btrfs_root *root, struct buffer_head *buf)
  21. {
  22. struct btrfs_node *node = btrfs_buffer_node(buf);
  23. if (buf->b_blocknr != btrfs_header_blocknr(&node->header)) {
  24. BUG();
  25. }
  26. if (root->node && btrfs_header_parentid(&node->header) !=
  27. btrfs_header_parentid(btrfs_buffer_header(root->node))) {
  28. BUG();
  29. }
  30. return 0;
  31. }
  32. struct buffer_head *btrfs_find_tree_block(struct btrfs_root *root, u64 blocknr)
  33. {
  34. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  35. int blockbits = root->fs_info->sb->s_blocksize_bits;
  36. unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
  37. struct page *page;
  38. struct buffer_head *bh;
  39. struct buffer_head *head;
  40. struct buffer_head *ret = NULL;
  41. page = find_lock_page(mapping, index);
  42. if (!page)
  43. return NULL;
  44. if (!page_has_buffers(page))
  45. goto out_unlock;
  46. head = page_buffers(page);
  47. bh = head;
  48. do {
  49. if (buffer_mapped(bh) && bh->b_blocknr == blocknr) {
  50. ret = bh;
  51. get_bh(bh);
  52. goto out_unlock;
  53. }
  54. bh = bh->b_this_page;
  55. } while (bh != head);
  56. out_unlock:
  57. unlock_page(page);
  58. if (ret) {
  59. touch_buffer(ret);
  60. check_pattern(ret);
  61. }
  62. page_cache_release(page);
  63. return ret;
  64. }
  65. struct buffer_head *btrfs_find_create_tree_block(struct btrfs_root *root,
  66. u64 blocknr)
  67. {
  68. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  69. int blockbits = root->fs_info->sb->s_blocksize_bits;
  70. unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
  71. struct page *page;
  72. struct buffer_head *bh;
  73. struct buffer_head *head;
  74. struct buffer_head *ret = NULL;
  75. u64 first_block = index << (PAGE_CACHE_SHIFT - blockbits);
  76. page = grab_cache_page(mapping, index);
  77. if (!page)
  78. return NULL;
  79. if (!page_has_buffers(page))
  80. create_empty_buffers(page, root->fs_info->sb->s_blocksize, 0);
  81. head = page_buffers(page);
  82. bh = head;
  83. do {
  84. if (!buffer_mapped(bh)) {
  85. bh->b_bdev = root->fs_info->sb->s_bdev;
  86. bh->b_blocknr = first_block;
  87. set_buffer_mapped(bh);
  88. set_pattern(bh);
  89. }
  90. if (bh->b_blocknr == blocknr) {
  91. ret = bh;
  92. get_bh(bh);
  93. goto out_unlock;
  94. }
  95. bh = bh->b_this_page;
  96. first_block++;
  97. } while (bh != head);
  98. out_unlock:
  99. unlock_page(page);
  100. if (ret)
  101. touch_buffer(ret);
  102. page_cache_release(page);
  103. return ret;
  104. }
  105. static sector_t max_block(struct block_device *bdev)
  106. {
  107. sector_t retval = ~((sector_t)0);
  108. loff_t sz = i_size_read(bdev->bd_inode);
  109. if (sz) {
  110. unsigned int size = block_size(bdev);
  111. unsigned int sizebits = blksize_bits(size);
  112. retval = (sz >> sizebits);
  113. }
  114. return retval;
  115. }
  116. static int btree_get_block(struct inode *inode, sector_t iblock,
  117. struct buffer_head *bh, int create)
  118. {
  119. if (iblock >= max_block(inode->i_sb->s_bdev)) {
  120. if (create)
  121. return -EIO;
  122. /*
  123. * for reads, we're just trying to fill a partial page.
  124. * return a hole, they will have to call get_block again
  125. * before they can fill it, and they will get -EIO at that
  126. * time
  127. */
  128. return 0;
  129. }
  130. bh->b_bdev = inode->i_sb->s_bdev;
  131. bh->b_blocknr = iblock;
  132. set_buffer_mapped(bh);
  133. return 0;
  134. }
  135. int btrfs_csum_data(struct btrfs_root * root, char *data, size_t len,
  136. char *result)
  137. {
  138. struct scatterlist sg;
  139. struct crypto_hash *tfm = root->fs_info->hash_tfm;
  140. struct hash_desc desc;
  141. int ret;
  142. desc.tfm = tfm;
  143. desc.flags = 0;
  144. sg_init_one(&sg, data, len);
  145. spin_lock(&root->fs_info->hash_lock);
  146. ret = crypto_hash_digest(&desc, &sg, 1, result);
  147. spin_unlock(&root->fs_info->hash_lock);
  148. if (ret) {
  149. printk("sha256 digest failed\n");
  150. }
  151. return ret;
  152. }
  153. static int csum_tree_block(struct btrfs_root *root, struct buffer_head *bh,
  154. int verify)
  155. {
  156. char result[BTRFS_CSUM_SIZE];
  157. int ret;
  158. struct btrfs_node *node;
  159. return 0;
  160. ret = btrfs_csum_data(root, bh->b_data + BTRFS_CSUM_SIZE,
  161. bh->b_size - BTRFS_CSUM_SIZE, result);
  162. if (ret)
  163. return ret;
  164. if (verify) {
  165. if (memcmp(bh->b_data, result, BTRFS_CSUM_SIZE)) {
  166. printk("checksum verify failed on %lu\n",
  167. bh->b_blocknr);
  168. return 1;
  169. }
  170. } else {
  171. node = btrfs_buffer_node(bh);
  172. memcpy(node->header.csum, result, BTRFS_CSUM_SIZE);
  173. }
  174. return 0;
  175. }
  176. static int btree_writepage(struct page *page, struct writeback_control *wbc)
  177. {
  178. #if 0
  179. struct buffer_head *bh;
  180. struct btrfs_root *root = btrfs_sb(page->mapping->host->i_sb);
  181. struct buffer_head *head;
  182. if (!page_has_buffers(page)) {
  183. create_empty_buffers(page, root->fs_info->sb->s_blocksize,
  184. (1 << BH_Dirty)|(1 << BH_Uptodate));
  185. }
  186. head = page_buffers(page);
  187. bh = head;
  188. do {
  189. if (buffer_dirty(bh))
  190. csum_tree_block(root, bh, 0);
  191. bh = bh->b_this_page;
  192. } while (bh != head);
  193. #endif
  194. return block_write_full_page(page, btree_get_block, wbc);
  195. }
  196. static int btree_readpage(struct file * file, struct page * page)
  197. {
  198. return block_read_full_page(page, btree_get_block);
  199. }
  200. static struct address_space_operations btree_aops = {
  201. .readpage = btree_readpage,
  202. .writepage = btree_writepage,
  203. .sync_page = block_sync_page,
  204. };
  205. struct buffer_head *read_tree_block(struct btrfs_root *root, u64 blocknr)
  206. {
  207. struct buffer_head *bh = NULL;
  208. bh = btrfs_find_create_tree_block(root, blocknr);
  209. if (!bh)
  210. return bh;
  211. lock_buffer(bh);
  212. if (!buffer_uptodate(bh)) {
  213. get_bh(bh);
  214. bh->b_end_io = end_buffer_read_sync;
  215. submit_bh(READ, bh);
  216. wait_on_buffer(bh);
  217. if (!buffer_uptodate(bh))
  218. goto fail;
  219. csum_tree_block(root, bh, 1);
  220. set_pattern(bh);
  221. } else {
  222. unlock_buffer(bh);
  223. }
  224. if (check_tree_block(root, bh))
  225. BUG();
  226. return bh;
  227. fail:
  228. brelse(bh);
  229. return NULL;
  230. }
  231. int dirty_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  232. struct buffer_head *buf)
  233. {
  234. WARN_ON(atomic_read(&buf->b_count) == 0);
  235. mark_buffer_dirty(buf);
  236. return 0;
  237. }
  238. int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  239. struct buffer_head *buf)
  240. {
  241. WARN_ON(atomic_read(&buf->b_count) == 0);
  242. clear_buffer_dirty(buf);
  243. return 0;
  244. }
  245. static int __setup_root(struct btrfs_super_block *super,
  246. struct btrfs_root *root,
  247. struct btrfs_fs_info *fs_info,
  248. u64 objectid)
  249. {
  250. root->node = NULL;
  251. root->commit_root = NULL;
  252. root->blocksize = btrfs_super_blocksize(super);
  253. root->ref_cows = 0;
  254. root->fs_info = fs_info;
  255. memset(&root->root_key, 0, sizeof(root->root_key));
  256. memset(&root->root_item, 0, sizeof(root->root_item));
  257. return 0;
  258. }
  259. static int find_and_setup_root(struct btrfs_super_block *super,
  260. struct btrfs_root *tree_root,
  261. struct btrfs_fs_info *fs_info,
  262. u64 objectid,
  263. struct btrfs_root *root)
  264. {
  265. int ret;
  266. __setup_root(super, root, fs_info, objectid);
  267. ret = btrfs_find_last_root(tree_root, objectid,
  268. &root->root_item, &root->root_key);
  269. BUG_ON(ret);
  270. root->node = read_tree_block(root,
  271. btrfs_root_blocknr(&root->root_item));
  272. BUG_ON(!root->node);
  273. return 0;
  274. }
  275. struct btrfs_root *open_ctree(struct super_block *sb,
  276. struct buffer_head *sb_buffer,
  277. struct btrfs_super_block *disk_super)
  278. {
  279. struct btrfs_root *root = kmalloc(sizeof(struct btrfs_root),
  280. GFP_NOFS);
  281. struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
  282. GFP_NOFS);
  283. struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
  284. GFP_NOFS);
  285. struct btrfs_root *inode_root = kmalloc(sizeof(struct btrfs_root),
  286. GFP_NOFS);
  287. struct btrfs_fs_info *fs_info = kmalloc(sizeof(*fs_info),
  288. GFP_NOFS);
  289. int ret;
  290. if (!btrfs_super_root(disk_super)) {
  291. return NULL;
  292. }
  293. init_bit_radix(&fs_info->pinned_radix);
  294. init_bit_radix(&fs_info->pending_del_radix);
  295. sb_set_blocksize(sb, sb_buffer->b_size);
  296. fs_info->running_transaction = NULL;
  297. fs_info->fs_root = root;
  298. fs_info->tree_root = tree_root;
  299. fs_info->extent_root = extent_root;
  300. fs_info->inode_root = inode_root;
  301. fs_info->last_inode_alloc = 0;
  302. fs_info->last_inode_alloc_dirid = 0;
  303. fs_info->disk_super = disk_super;
  304. fs_info->sb = sb;
  305. fs_info->btree_inode = new_inode(sb);
  306. fs_info->btree_inode->i_ino = 1;
  307. fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
  308. fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
  309. insert_inode_hash(fs_info->btree_inode);
  310. mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
  311. fs_info->hash_tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
  312. spin_lock_init(&fs_info->hash_lock);
  313. if (!fs_info->hash_tfm || IS_ERR(fs_info->hash_tfm)) {
  314. printk("failed to allocate sha256 hash\n");
  315. return NULL;
  316. }
  317. mutex_init(&fs_info->trans_mutex);
  318. mutex_init(&fs_info->fs_mutex);
  319. memset(&fs_info->current_insert, 0, sizeof(fs_info->current_insert));
  320. memset(&fs_info->last_insert, 0, sizeof(fs_info->last_insert));
  321. __setup_root(disk_super, tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
  322. fs_info->sb_buffer = read_tree_block(tree_root, sb_buffer->b_blocknr);
  323. if (!fs_info->sb_buffer) {
  324. printk("failed2\n");
  325. return NULL;
  326. }
  327. brelse(sb_buffer);
  328. sb_buffer = NULL;
  329. disk_super = (struct btrfs_super_block *)fs_info->sb_buffer->b_data;
  330. fs_info->disk_super = disk_super;
  331. tree_root->node = read_tree_block(tree_root,
  332. btrfs_super_root(disk_super));
  333. BUG_ON(!tree_root->node);
  334. ret = find_and_setup_root(disk_super, tree_root, fs_info,
  335. BTRFS_EXTENT_TREE_OBJECTID, extent_root);
  336. BUG_ON(ret);
  337. ret = find_and_setup_root(disk_super, tree_root, fs_info,
  338. BTRFS_INODE_MAP_OBJECTID, inode_root);
  339. BUG_ON(ret);
  340. ret = find_and_setup_root(disk_super, tree_root, fs_info,
  341. BTRFS_FS_TREE_OBJECTID, root);
  342. BUG_ON(ret);
  343. root->commit_root = root->node;
  344. get_bh(root->node);
  345. root->ref_cows = 1;
  346. root->fs_info->generation = root->root_key.offset + 1;
  347. return root;
  348. }
  349. int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
  350. *root)
  351. {
  352. struct buffer_head *bh = root->fs_info->sb_buffer;
  353. btrfs_set_super_root(root->fs_info->disk_super,
  354. root->fs_info->tree_root->node->b_blocknr);
  355. lock_buffer(bh);
  356. clear_buffer_dirty(bh);
  357. csum_tree_block(root, bh, 0);
  358. bh->b_end_io = end_buffer_write_sync;
  359. get_bh(bh);
  360. submit_bh(WRITE, bh);
  361. wait_on_buffer(bh);
  362. if (!buffer_uptodate(bh)) {
  363. WARN_ON(1);
  364. return -EIO;
  365. }
  366. return 0;
  367. }
  368. int close_ctree(struct btrfs_root *root)
  369. {
  370. int ret;
  371. struct btrfs_trans_handle *trans;
  372. trans = btrfs_start_transaction(root, 1);
  373. btrfs_commit_transaction(trans, root);
  374. /* run commit again to drop the original snapshot */
  375. trans = btrfs_start_transaction(root, 1);
  376. btrfs_commit_transaction(trans, root);
  377. ret = btrfs_write_and_wait_transaction(NULL, root);
  378. BUG_ON(ret);
  379. write_ctree_super(NULL, root);
  380. if (root->node)
  381. btrfs_block_release(root, root->node);
  382. if (root->fs_info->extent_root->node)
  383. btrfs_block_release(root->fs_info->extent_root,
  384. root->fs_info->extent_root->node);
  385. if (root->fs_info->inode_root->node)
  386. btrfs_block_release(root->fs_info->inode_root,
  387. root->fs_info->inode_root->node);
  388. if (root->fs_info->tree_root->node)
  389. btrfs_block_release(root->fs_info->tree_root,
  390. root->fs_info->tree_root->node);
  391. btrfs_block_release(root, root->commit_root);
  392. btrfs_block_release(root, root->fs_info->sb_buffer);
  393. crypto_free_hash(root->fs_info->hash_tfm);
  394. truncate_inode_pages(root->fs_info->btree_inode->i_mapping, 0);
  395. iput(root->fs_info->btree_inode);
  396. kfree(root->fs_info->extent_root);
  397. kfree(root->fs_info->inode_root);
  398. kfree(root->fs_info->tree_root);
  399. kfree(root->fs_info);
  400. kfree(root);
  401. return 0;
  402. }
  403. void btrfs_block_release(struct btrfs_root *root, struct buffer_head *buf)
  404. {
  405. check_pattern(buf);
  406. brelse(buf);
  407. }