disk-io.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. #include <linux/module.h>
  2. #include <linux/fs.h>
  3. #include <linux/blkdev.h>
  4. #include <linux/crypto.h>
  5. #include <linux/scatterlist.h>
  6. #include <linux/swap.h>
  7. #include "ctree.h"
  8. #include "disk-io.h"
  9. #include "transaction.h"
  10. static int check_tree_block(struct btrfs_root *root, struct buffer_head *buf)
  11. {
  12. struct btrfs_node *node = btrfs_buffer_node(buf);
  13. if (buf->b_blocknr != btrfs_header_blocknr(&node->header)) {
  14. BUG();
  15. }
  16. if (root->node && btrfs_header_parentid(&node->header) !=
  17. btrfs_header_parentid(btrfs_buffer_header(root->node))) {
  18. BUG();
  19. }
  20. return 0;
  21. }
  22. struct buffer_head *btrfs_find_tree_block(struct btrfs_root *root, u64 blocknr)
  23. {
  24. return sb_find_get_block(root->fs_info->sb, blocknr);
  25. #if 0
  26. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  27. int blockbits = root->fs_info->sb->s_blocksize_bits;
  28. unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
  29. struct page *page;
  30. struct buffer_head *bh;
  31. struct buffer_head *head;
  32. struct buffer_head *ret = NULL;
  33. page = find_lock_page(mapping, index);
  34. if (!page)
  35. return NULL;
  36. if (!page_has_buffers(page))
  37. goto out_unlock;
  38. head = page_buffers(page);
  39. bh = head;
  40. do {
  41. if (buffer_mapped(bh) && bh->b_blocknr == blocknr) {
  42. ret = bh;
  43. get_bh(bh);
  44. goto out_unlock;
  45. }
  46. bh = bh->b_this_page;
  47. } while (bh != head);
  48. out_unlock:
  49. unlock_page(page);
  50. if (ret) {
  51. touch_buffer(ret);
  52. }
  53. page_cache_release(page);
  54. return ret;
  55. #endif
  56. }
  57. struct buffer_head *btrfs_find_create_tree_block(struct btrfs_root *root,
  58. u64 blocknr)
  59. {
  60. return sb_getblk(root->fs_info->sb, blocknr);
  61. #if 0
  62. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  63. int blockbits = root->fs_info->sb->s_blocksize_bits;
  64. unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
  65. struct page *page;
  66. struct buffer_head *bh;
  67. struct buffer_head *head;
  68. struct buffer_head *ret = NULL;
  69. u64 first_block = index << (PAGE_CACHE_SHIFT - blockbits);
  70. page = grab_cache_page(mapping, index);
  71. if (!page)
  72. return NULL;
  73. if (!page_has_buffers(page))
  74. create_empty_buffers(page, root->fs_info->sb->s_blocksize, 0);
  75. head = page_buffers(page);
  76. bh = head;
  77. do {
  78. if (!buffer_mapped(bh)) {
  79. bh->b_bdev = root->fs_info->sb->s_bdev;
  80. bh->b_blocknr = first_block;
  81. set_buffer_mapped(bh);
  82. }
  83. if (bh->b_blocknr == blocknr) {
  84. ret = bh;
  85. get_bh(bh);
  86. goto out_unlock;
  87. }
  88. bh = bh->b_this_page;
  89. first_block++;
  90. } while (bh != head);
  91. out_unlock:
  92. unlock_page(page);
  93. if (ret)
  94. touch_buffer(ret);
  95. page_cache_release(page);
  96. return ret;
  97. #endif
  98. }
  99. static sector_t max_block(struct block_device *bdev)
  100. {
  101. sector_t retval = ~((sector_t)0);
  102. loff_t sz = i_size_read(bdev->bd_inode);
  103. if (sz) {
  104. unsigned int size = block_size(bdev);
  105. unsigned int sizebits = blksize_bits(size);
  106. retval = (sz >> sizebits);
  107. }
  108. return retval;
  109. }
  110. static int btree_get_block(struct inode *inode, sector_t iblock,
  111. struct buffer_head *bh, int create)
  112. {
  113. if (iblock >= max_block(inode->i_sb->s_bdev)) {
  114. if (create)
  115. return -EIO;
  116. /*
  117. * for reads, we're just trying to fill a partial page.
  118. * return a hole, they will have to call get_block again
  119. * before they can fill it, and they will get -EIO at that
  120. * time
  121. */
  122. return 0;
  123. }
  124. bh->b_bdev = inode->i_sb->s_bdev;
  125. bh->b_blocknr = iblock;
  126. set_buffer_mapped(bh);
  127. return 0;
  128. }
  129. int btrfs_csum_data(struct btrfs_root * root, char *data, size_t len,
  130. char *result)
  131. {
  132. struct scatterlist sg;
  133. struct crypto_hash *tfm = root->fs_info->hash_tfm;
  134. struct hash_desc desc;
  135. int ret;
  136. desc.tfm = tfm;
  137. desc.flags = 0;
  138. sg_init_one(&sg, data, len);
  139. spin_lock(&root->fs_info->hash_lock);
  140. ret = crypto_hash_digest(&desc, &sg, 1, result);
  141. spin_unlock(&root->fs_info->hash_lock);
  142. if (ret) {
  143. printk("sha256 digest failed\n");
  144. }
  145. return ret;
  146. }
  147. static int csum_tree_block(struct btrfs_root *root, struct buffer_head *bh,
  148. int verify)
  149. {
  150. char result[BTRFS_CSUM_SIZE];
  151. int ret;
  152. struct btrfs_node *node;
  153. return 0;
  154. ret = btrfs_csum_data(root, bh->b_data + BTRFS_CSUM_SIZE,
  155. bh->b_size - BTRFS_CSUM_SIZE, result);
  156. if (ret)
  157. return ret;
  158. if (verify) {
  159. if (memcmp(bh->b_data, result, BTRFS_CSUM_SIZE)) {
  160. printk("checksum verify failed on %lu\n",
  161. bh->b_blocknr);
  162. return 1;
  163. }
  164. } else {
  165. node = btrfs_buffer_node(bh);
  166. memcpy(node->header.csum, result, BTRFS_CSUM_SIZE);
  167. }
  168. return 0;
  169. }
  170. static int btree_writepage(struct page *page, struct writeback_control *wbc)
  171. {
  172. #if 0
  173. struct buffer_head *bh;
  174. struct btrfs_root *root = btrfs_sb(page->mapping->host->i_sb);
  175. struct buffer_head *head;
  176. if (!page_has_buffers(page)) {
  177. create_empty_buffers(page, root->fs_info->sb->s_blocksize,
  178. (1 << BH_Dirty)|(1 << BH_Uptodate));
  179. }
  180. head = page_buffers(page);
  181. bh = head;
  182. do {
  183. if (buffer_dirty(bh))
  184. csum_tree_block(root, bh, 0);
  185. bh = bh->b_this_page;
  186. } while (bh != head);
  187. #endif
  188. return block_write_full_page(page, btree_get_block, wbc);
  189. }
  190. static int btree_readpage(struct file * file, struct page * page)
  191. {
  192. return block_read_full_page(page, btree_get_block);
  193. }
  194. static struct address_space_operations btree_aops = {
  195. .readpage = btree_readpage,
  196. .writepage = btree_writepage,
  197. .sync_page = block_sync_page,
  198. };
  199. struct buffer_head *read_tree_block(struct btrfs_root *root, u64 blocknr)
  200. {
  201. return sb_bread(root->fs_info->sb, blocknr);
  202. #if 0
  203. struct buffer_head *bh = NULL;
  204. bh = btrfs_find_create_tree_block(root, blocknr);
  205. if (!bh)
  206. return bh;
  207. lock_buffer(bh);
  208. if (!buffer_uptodate(bh)) {
  209. get_bh(bh);
  210. bh->b_end_io = end_buffer_read_sync;
  211. submit_bh(READ, bh);
  212. wait_on_buffer(bh);
  213. if (!buffer_uptodate(bh))
  214. goto fail;
  215. csum_tree_block(root, bh, 1);
  216. } else {
  217. unlock_buffer(bh);
  218. }
  219. if (check_tree_block(root, bh))
  220. BUG();
  221. return bh;
  222. fail:
  223. brelse(bh);
  224. return NULL;
  225. #endif
  226. }
  227. int dirty_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  228. struct buffer_head *buf)
  229. {
  230. WARN_ON(atomic_read(&buf->b_count) == 0);
  231. mark_buffer_dirty(buf);
  232. return 0;
  233. }
  234. int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  235. struct buffer_head *buf)
  236. {
  237. WARN_ON(atomic_read(&buf->b_count) == 0);
  238. clear_buffer_dirty(buf);
  239. return 0;
  240. }
  241. static int __setup_root(int blocksize,
  242. struct btrfs_root *root,
  243. struct btrfs_fs_info *fs_info,
  244. u64 objectid)
  245. {
  246. root->node = NULL;
  247. root->commit_root = NULL;
  248. root->blocksize = blocksize;
  249. root->ref_cows = 0;
  250. root->fs_info = fs_info;
  251. memset(&root->root_key, 0, sizeof(root->root_key));
  252. memset(&root->root_item, 0, sizeof(root->root_item));
  253. return 0;
  254. }
  255. static int find_and_setup_root(int blocksize,
  256. struct btrfs_root *tree_root,
  257. struct btrfs_fs_info *fs_info,
  258. u64 objectid,
  259. struct btrfs_root *root)
  260. {
  261. int ret;
  262. __setup_root(blocksize, root, fs_info, objectid);
  263. ret = btrfs_find_last_root(tree_root, objectid,
  264. &root->root_item, &root->root_key);
  265. BUG_ON(ret);
  266. root->node = read_tree_block(root,
  267. btrfs_root_blocknr(&root->root_item));
  268. BUG_ON(!root->node);
  269. return 0;
  270. }
  271. struct btrfs_root *open_ctree(struct super_block *sb)
  272. {
  273. struct btrfs_root *root = kmalloc(sizeof(struct btrfs_root),
  274. GFP_NOFS);
  275. struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
  276. GFP_NOFS);
  277. struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
  278. GFP_NOFS);
  279. struct btrfs_root *inode_root = kmalloc(sizeof(struct btrfs_root),
  280. GFP_NOFS);
  281. struct btrfs_fs_info *fs_info = kmalloc(sizeof(*fs_info),
  282. GFP_NOFS);
  283. int ret;
  284. struct btrfs_super_block *disk_super;
  285. init_bit_radix(&fs_info->pinned_radix);
  286. init_bit_radix(&fs_info->pending_del_radix);
  287. sb_set_blocksize(sb, 4096);
  288. fs_info->running_transaction = NULL;
  289. fs_info->fs_root = root;
  290. fs_info->tree_root = tree_root;
  291. fs_info->extent_root = extent_root;
  292. fs_info->inode_root = inode_root;
  293. fs_info->last_inode_alloc = 0;
  294. fs_info->last_inode_alloc_dirid = 0;
  295. fs_info->sb = sb;
  296. fs_info->btree_inode = NULL;
  297. #if 0
  298. fs_info->btree_inode = new_inode(sb);
  299. fs_info->btree_inode->i_ino = 1;
  300. fs_info->btree_inode->i_nlink = 1;
  301. fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
  302. fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
  303. insert_inode_hash(fs_info->btree_inode);
  304. mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
  305. #endif
  306. fs_info->hash_tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
  307. spin_lock_init(&fs_info->hash_lock);
  308. if (!fs_info->hash_tfm || IS_ERR(fs_info->hash_tfm)) {
  309. printk("failed to allocate sha256 hash\n");
  310. return NULL;
  311. }
  312. mutex_init(&fs_info->trans_mutex);
  313. mutex_init(&fs_info->fs_mutex);
  314. memset(&fs_info->current_insert, 0, sizeof(fs_info->current_insert));
  315. memset(&fs_info->last_insert, 0, sizeof(fs_info->last_insert));
  316. __setup_root(sb->s_blocksize, tree_root,
  317. fs_info, BTRFS_ROOT_TREE_OBJECTID);
  318. fs_info->sb_buffer = read_tree_block(tree_root,
  319. BTRFS_SUPER_INFO_OFFSET /
  320. sb->s_blocksize);
  321. if (!fs_info->sb_buffer) {
  322. printk("failed2\n");
  323. return NULL;
  324. }
  325. disk_super = (struct btrfs_super_block *)fs_info->sb_buffer->b_data;
  326. if (!btrfs_super_root(disk_super)) {
  327. return NULL;
  328. }
  329. fs_info->disk_super = disk_super;
  330. tree_root->node = read_tree_block(tree_root,
  331. btrfs_super_root(disk_super));
  332. BUG_ON(!tree_root->node);
  333. mutex_lock(&fs_info->fs_mutex);
  334. ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
  335. BTRFS_EXTENT_TREE_OBJECTID, extent_root);
  336. BUG_ON(ret);
  337. ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
  338. BTRFS_INODE_MAP_OBJECTID, inode_root);
  339. BUG_ON(ret);
  340. ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
  341. BTRFS_FS_TREE_OBJECTID, root);
  342. mutex_unlock(&fs_info->fs_mutex);
  343. BUG_ON(ret);
  344. root->commit_root = root->node;
  345. get_bh(root->node);
  346. root->ref_cows = 1;
  347. root->fs_info->generation = root->root_key.offset + 1;
  348. return root;
  349. }
  350. int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
  351. *root)
  352. {
  353. struct buffer_head *bh = root->fs_info->sb_buffer;
  354. btrfs_set_super_root(root->fs_info->disk_super,
  355. root->fs_info->tree_root->node->b_blocknr);
  356. lock_buffer(bh);
  357. WARN_ON(atomic_read(&bh->b_count) < 1);
  358. clear_buffer_dirty(bh);
  359. csum_tree_block(root, bh, 0);
  360. bh->b_end_io = end_buffer_write_sync;
  361. get_bh(bh);
  362. submit_bh(WRITE, bh);
  363. wait_on_buffer(bh);
  364. if (!buffer_uptodate(bh)) {
  365. WARN_ON(1);
  366. return -EIO;
  367. }
  368. return 0;
  369. }
  370. int close_ctree(struct btrfs_root *root)
  371. {
  372. int ret;
  373. struct btrfs_trans_handle *trans;
  374. mutex_lock(&root->fs_info->fs_mutex);
  375. trans = btrfs_start_transaction(root, 1);
  376. btrfs_commit_transaction(trans, root);
  377. /* run commit again to drop the original snapshot */
  378. trans = btrfs_start_transaction(root, 1);
  379. btrfs_commit_transaction(trans, root);
  380. ret = btrfs_write_and_wait_transaction(NULL, root);
  381. BUG_ON(ret);
  382. write_ctree_super(NULL, root);
  383. mutex_unlock(&root->fs_info->fs_mutex);
  384. if (root->node)
  385. btrfs_block_release(root, root->node);
  386. if (root->fs_info->extent_root->node)
  387. btrfs_block_release(root->fs_info->extent_root,
  388. root->fs_info->extent_root->node);
  389. if (root->fs_info->inode_root->node)
  390. btrfs_block_release(root->fs_info->inode_root,
  391. root->fs_info->inode_root->node);
  392. if (root->fs_info->tree_root->node)
  393. btrfs_block_release(root->fs_info->tree_root,
  394. root->fs_info->tree_root->node);
  395. btrfs_block_release(root, root->commit_root);
  396. btrfs_block_release(root, root->fs_info->sb_buffer);
  397. crypto_free_hash(root->fs_info->hash_tfm);
  398. // truncate_inode_pages(root->fs_info->btree_inode->i_mapping, 0);
  399. // iput(root->fs_info->btree_inode);
  400. kfree(root->fs_info->extent_root);
  401. kfree(root->fs_info->inode_root);
  402. kfree(root->fs_info->tree_root);
  403. kfree(root->fs_info);
  404. kfree(root);
  405. return 0;
  406. }
  407. void btrfs_block_release(struct btrfs_root *root, struct buffer_head *buf)
  408. {
  409. // brelse(buf);
  410. }