disk-io.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/version.h>
  19. #include <linux/fs.h>
  20. #include <linux/blkdev.h>
  21. #include <linux/scatterlist.h>
  22. #include <linux/swap.h>
  23. #include <linux/radix-tree.h>
  24. #include <linux/writeback.h>
  25. #include <linux/buffer_head.h> // for block_sync_page
  26. #include <linux/workqueue.h>
  27. #include <linux/kthread.h>
  28. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  29. # include <linux/freezer.h>
  30. #else
  31. # include <linux/sched.h>
  32. #endif
  33. #include "crc32c.h"
  34. #include "ctree.h"
  35. #include "disk-io.h"
  36. #include "transaction.h"
  37. #include "btrfs_inode.h"
  38. #include "volumes.h"
  39. #include "print-tree.h"
  40. #include "async-thread.h"
  41. #include "locking.h"
  42. #if 0
  43. static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
  44. {
  45. if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) {
  46. printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n",
  47. (unsigned long long)extent_buffer_blocknr(buf),
  48. (unsigned long long)btrfs_header_blocknr(buf));
  49. return 1;
  50. }
  51. return 0;
  52. }
  53. #endif
  54. static struct extent_io_ops btree_extent_io_ops;
  55. static void end_workqueue_fn(struct btrfs_work *work);
  56. struct end_io_wq {
  57. struct bio *bio;
  58. bio_end_io_t *end_io;
  59. void *private;
  60. struct btrfs_fs_info *info;
  61. int error;
  62. int metadata;
  63. struct list_head list;
  64. struct btrfs_work work;
  65. };
  66. struct async_submit_bio {
  67. struct inode *inode;
  68. struct bio *bio;
  69. struct list_head list;
  70. extent_submit_bio_hook_t *submit_bio_hook;
  71. int rw;
  72. int mirror_num;
  73. struct btrfs_work work;
  74. };
  75. struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
  76. size_t page_offset, u64 start, u64 len,
  77. int create)
  78. {
  79. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  80. struct extent_map *em;
  81. int ret;
  82. spin_lock(&em_tree->lock);
  83. em = lookup_extent_mapping(em_tree, start, len);
  84. if (em) {
  85. em->bdev =
  86. BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
  87. spin_unlock(&em_tree->lock);
  88. goto out;
  89. }
  90. spin_unlock(&em_tree->lock);
  91. em = alloc_extent_map(GFP_NOFS);
  92. if (!em) {
  93. em = ERR_PTR(-ENOMEM);
  94. goto out;
  95. }
  96. em->start = 0;
  97. em->len = (u64)-1;
  98. em->block_start = 0;
  99. em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
  100. spin_lock(&em_tree->lock);
  101. ret = add_extent_mapping(em_tree, em);
  102. if (ret == -EEXIST) {
  103. u64 failed_start = em->start;
  104. u64 failed_len = em->len;
  105. printk("failed to insert %Lu %Lu -> %Lu into tree\n",
  106. em->start, em->len, em->block_start);
  107. free_extent_map(em);
  108. em = lookup_extent_mapping(em_tree, start, len);
  109. if (em) {
  110. printk("after failing, found %Lu %Lu %Lu\n",
  111. em->start, em->len, em->block_start);
  112. ret = 0;
  113. } else {
  114. em = lookup_extent_mapping(em_tree, failed_start,
  115. failed_len);
  116. if (em) {
  117. printk("double failure lookup gives us "
  118. "%Lu %Lu -> %Lu\n", em->start,
  119. em->len, em->block_start);
  120. free_extent_map(em);
  121. }
  122. ret = -EIO;
  123. }
  124. } else if (ret) {
  125. free_extent_map(em);
  126. em = NULL;
  127. }
  128. spin_unlock(&em_tree->lock);
  129. if (ret)
  130. em = ERR_PTR(ret);
  131. out:
  132. return em;
  133. }
  134. u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
  135. {
  136. return btrfs_crc32c(seed, data, len);
  137. }
  138. void btrfs_csum_final(u32 crc, char *result)
  139. {
  140. *(__le32 *)result = ~cpu_to_le32(crc);
  141. }
  142. static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
  143. int verify)
  144. {
  145. char result[BTRFS_CRC32_SIZE];
  146. unsigned long len;
  147. unsigned long cur_len;
  148. unsigned long offset = BTRFS_CSUM_SIZE;
  149. char *map_token = NULL;
  150. char *kaddr;
  151. unsigned long map_start;
  152. unsigned long map_len;
  153. int err;
  154. u32 crc = ~(u32)0;
  155. len = buf->len - offset;
  156. while(len > 0) {
  157. err = map_private_extent_buffer(buf, offset, 32,
  158. &map_token, &kaddr,
  159. &map_start, &map_len, KM_USER0);
  160. if (err) {
  161. printk("failed to map extent buffer! %lu\n",
  162. offset);
  163. return 1;
  164. }
  165. cur_len = min(len, map_len - (offset - map_start));
  166. crc = btrfs_csum_data(root, kaddr + offset - map_start,
  167. crc, cur_len);
  168. len -= cur_len;
  169. offset += cur_len;
  170. unmap_extent_buffer(buf, map_token, KM_USER0);
  171. }
  172. btrfs_csum_final(crc, result);
  173. if (verify) {
  174. int from_this_trans = 0;
  175. if (root->fs_info->running_transaction &&
  176. btrfs_header_generation(buf) ==
  177. root->fs_info->running_transaction->transid)
  178. from_this_trans = 1;
  179. /* FIXME, this is not good */
  180. if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
  181. u32 val;
  182. u32 found = 0;
  183. memcpy(&found, result, BTRFS_CRC32_SIZE);
  184. read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
  185. printk("btrfs: %s checksum verify failed on %llu "
  186. "wanted %X found %X from_this_trans %d "
  187. "level %d\n",
  188. root->fs_info->sb->s_id,
  189. buf->start, val, found, from_this_trans,
  190. btrfs_header_level(buf));
  191. return 1;
  192. }
  193. } else {
  194. write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE);
  195. }
  196. return 0;
  197. }
  198. static int verify_parent_transid(struct extent_io_tree *io_tree,
  199. struct extent_buffer *eb, u64 parent_transid)
  200. {
  201. int ret;
  202. if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
  203. return 0;
  204. lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
  205. if (extent_buffer_uptodate(io_tree, eb) &&
  206. btrfs_header_generation(eb) == parent_transid) {
  207. ret = 0;
  208. goto out;
  209. }
  210. printk("parent transid verify failed on %llu wanted %llu found %llu\n",
  211. (unsigned long long)eb->start,
  212. (unsigned long long)parent_transid,
  213. (unsigned long long)btrfs_header_generation(eb));
  214. ret = 1;
  215. out:
  216. clear_extent_buffer_uptodate(io_tree, eb);
  217. unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
  218. GFP_NOFS);
  219. return ret;
  220. }
  221. static int btree_read_extent_buffer_pages(struct btrfs_root *root,
  222. struct extent_buffer *eb,
  223. u64 start, u64 parent_transid)
  224. {
  225. struct extent_io_tree *io_tree;
  226. int ret;
  227. int num_copies = 0;
  228. int mirror_num = 0;
  229. io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
  230. while (1) {
  231. ret = read_extent_buffer_pages(io_tree, eb, start, 1,
  232. btree_get_extent, mirror_num);
  233. if (!ret &&
  234. !verify_parent_transid(io_tree, eb, parent_transid))
  235. return ret;
  236. num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
  237. eb->start, eb->len);
  238. if (num_copies == 1)
  239. return ret;
  240. mirror_num++;
  241. if (mirror_num > num_copies)
  242. return ret;
  243. }
  244. return -EIO;
  245. }
  246. int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
  247. {
  248. struct extent_io_tree *tree;
  249. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  250. u64 found_start;
  251. int found_level;
  252. unsigned long len;
  253. struct extent_buffer *eb;
  254. int ret;
  255. tree = &BTRFS_I(page->mapping->host)->io_tree;
  256. if (page->private == EXTENT_PAGE_PRIVATE)
  257. goto out;
  258. if (!page->private)
  259. goto out;
  260. len = page->private >> 2;
  261. if (len == 0) {
  262. WARN_ON(1);
  263. }
  264. eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
  265. ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
  266. btrfs_header_generation(eb));
  267. BUG_ON(ret);
  268. found_start = btrfs_header_bytenr(eb);
  269. if (found_start != start) {
  270. printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
  271. start, found_start, len);
  272. WARN_ON(1);
  273. goto err;
  274. }
  275. if (eb->first_page != page) {
  276. printk("bad first page %lu %lu\n", eb->first_page->index,
  277. page->index);
  278. WARN_ON(1);
  279. goto err;
  280. }
  281. if (!PageUptodate(page)) {
  282. printk("csum not up to date page %lu\n", page->index);
  283. WARN_ON(1);
  284. goto err;
  285. }
  286. found_level = btrfs_header_level(eb);
  287. spin_lock(&root->fs_info->hash_lock);
  288. btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
  289. spin_unlock(&root->fs_info->hash_lock);
  290. csum_tree_block(root, eb, 0);
  291. err:
  292. free_extent_buffer(eb);
  293. out:
  294. return 0;
  295. }
  296. static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
  297. {
  298. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  299. csum_dirty_buffer(root, page);
  300. return 0;
  301. }
  302. int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
  303. struct extent_state *state)
  304. {
  305. struct extent_io_tree *tree;
  306. u64 found_start;
  307. int found_level;
  308. unsigned long len;
  309. struct extent_buffer *eb;
  310. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  311. int ret = 0;
  312. tree = &BTRFS_I(page->mapping->host)->io_tree;
  313. if (page->private == EXTENT_PAGE_PRIVATE)
  314. goto out;
  315. if (!page->private)
  316. goto out;
  317. len = page->private >> 2;
  318. if (len == 0) {
  319. WARN_ON(1);
  320. }
  321. eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
  322. found_start = btrfs_header_bytenr(eb);
  323. if (found_start != start) {
  324. ret = -EIO;
  325. goto err;
  326. }
  327. if (eb->first_page != page) {
  328. printk("bad first page %lu %lu\n", eb->first_page->index,
  329. page->index);
  330. WARN_ON(1);
  331. ret = -EIO;
  332. goto err;
  333. }
  334. if (memcmp_extent_buffer(eb, root->fs_info->fsid,
  335. (unsigned long)btrfs_header_fsid(eb),
  336. BTRFS_FSID_SIZE)) {
  337. printk("bad fsid on block %Lu\n", eb->start);
  338. ret = -EIO;
  339. goto err;
  340. }
  341. found_level = btrfs_header_level(eb);
  342. ret = csum_tree_block(root, eb, 1);
  343. if (ret)
  344. ret = -EIO;
  345. end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
  346. end = eb->start + end - 1;
  347. release_extent_buffer_tail_pages(eb);
  348. err:
  349. free_extent_buffer(eb);
  350. out:
  351. return ret;
  352. }
  353. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  354. static void end_workqueue_bio(struct bio *bio, int err)
  355. #else
  356. static int end_workqueue_bio(struct bio *bio,
  357. unsigned int bytes_done, int err)
  358. #endif
  359. {
  360. struct end_io_wq *end_io_wq = bio->bi_private;
  361. struct btrfs_fs_info *fs_info;
  362. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  363. if (bio->bi_size)
  364. return 1;
  365. #endif
  366. fs_info = end_io_wq->info;
  367. end_io_wq->error = err;
  368. end_io_wq->work.func = end_workqueue_fn;
  369. end_io_wq->work.flags = 0;
  370. if (bio->bi_rw & (1 << BIO_RW))
  371. btrfs_queue_worker(&fs_info->endio_write_workers,
  372. &end_io_wq->work);
  373. else
  374. btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
  375. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  376. return 0;
  377. #endif
  378. }
  379. int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
  380. int metadata)
  381. {
  382. struct end_io_wq *end_io_wq;
  383. end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
  384. if (!end_io_wq)
  385. return -ENOMEM;
  386. end_io_wq->private = bio->bi_private;
  387. end_io_wq->end_io = bio->bi_end_io;
  388. end_io_wq->info = info;
  389. end_io_wq->error = 0;
  390. end_io_wq->bio = bio;
  391. end_io_wq->metadata = metadata;
  392. bio->bi_private = end_io_wq;
  393. bio->bi_end_io = end_workqueue_bio;
  394. return 0;
  395. }
  396. static void run_one_async_submit(struct btrfs_work *work)
  397. {
  398. struct btrfs_fs_info *fs_info;
  399. struct async_submit_bio *async;
  400. async = container_of(work, struct async_submit_bio, work);
  401. fs_info = BTRFS_I(async->inode)->root->fs_info;
  402. atomic_dec(&fs_info->nr_async_submits);
  403. async->submit_bio_hook(async->inode, async->rw, async->bio,
  404. async->mirror_num);
  405. kfree(async);
  406. }
  407. int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
  408. int rw, struct bio *bio, int mirror_num,
  409. extent_submit_bio_hook_t *submit_bio_hook)
  410. {
  411. struct async_submit_bio *async;
  412. async = kmalloc(sizeof(*async), GFP_NOFS);
  413. if (!async)
  414. return -ENOMEM;
  415. async->inode = inode;
  416. async->rw = rw;
  417. async->bio = bio;
  418. async->mirror_num = mirror_num;
  419. async->submit_bio_hook = submit_bio_hook;
  420. async->work.func = run_one_async_submit;
  421. async->work.flags = 0;
  422. atomic_inc(&fs_info->nr_async_submits);
  423. btrfs_queue_worker(&fs_info->workers, &async->work);
  424. return 0;
  425. }
  426. static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
  427. int mirror_num)
  428. {
  429. struct btrfs_root *root = BTRFS_I(inode)->root;
  430. u64 offset;
  431. int ret;
  432. offset = bio->bi_sector << 9;
  433. /*
  434. * when we're called for a write, we're already in the async
  435. * submission context. Just jump ingo btrfs_map_bio
  436. */
  437. if (rw & (1 << BIO_RW)) {
  438. return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
  439. mirror_num, 0);
  440. }
  441. /*
  442. * called for a read, do the setup so that checksum validation
  443. * can happen in the async kernel threads
  444. */
  445. ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
  446. BUG_ON(ret);
  447. return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
  448. }
  449. static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
  450. int mirror_num)
  451. {
  452. /*
  453. * kthread helpers are used to submit writes so that checksumming
  454. * can happen in parallel across all CPUs
  455. */
  456. if (!(rw & (1 << BIO_RW))) {
  457. return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
  458. }
  459. return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
  460. inode, rw, bio, mirror_num,
  461. __btree_submit_bio_hook);
  462. }
  463. static int btree_writepage(struct page *page, struct writeback_control *wbc)
  464. {
  465. struct extent_io_tree *tree;
  466. tree = &BTRFS_I(page->mapping->host)->io_tree;
  467. return extent_write_full_page(tree, page, btree_get_extent, wbc);
  468. }
  469. static int btree_writepages(struct address_space *mapping,
  470. struct writeback_control *wbc)
  471. {
  472. struct extent_io_tree *tree;
  473. tree = &BTRFS_I(mapping->host)->io_tree;
  474. if (wbc->sync_mode == WB_SYNC_NONE) {
  475. u64 num_dirty;
  476. u64 start = 0;
  477. unsigned long thresh = 96 * 1024 * 1024;
  478. if (wbc->for_kupdate)
  479. return 0;
  480. if (current_is_pdflush()) {
  481. thresh = 96 * 1024 * 1024;
  482. } else {
  483. thresh = 8 * 1024 * 1024;
  484. }
  485. num_dirty = count_range_bits(tree, &start, (u64)-1,
  486. thresh, EXTENT_DIRTY);
  487. if (num_dirty < thresh) {
  488. return 0;
  489. }
  490. }
  491. return extent_writepages(tree, mapping, btree_get_extent, wbc);
  492. }
  493. int btree_readpage(struct file *file, struct page *page)
  494. {
  495. struct extent_io_tree *tree;
  496. tree = &BTRFS_I(page->mapping->host)->io_tree;
  497. return extent_read_full_page(tree, page, btree_get_extent);
  498. }
  499. static int btree_releasepage(struct page *page, gfp_t gfp_flags)
  500. {
  501. struct extent_io_tree *tree;
  502. struct extent_map_tree *map;
  503. int ret;
  504. if (page_count(page) > 3) {
  505. /* once for page->private, once for the caller, once
  506. * once for the page cache
  507. */
  508. return 0;
  509. }
  510. tree = &BTRFS_I(page->mapping->host)->io_tree;
  511. map = &BTRFS_I(page->mapping->host)->extent_tree;
  512. ret = try_release_extent_state(map, tree, page, gfp_flags);
  513. if (ret == 1) {
  514. invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
  515. ClearPagePrivate(page);
  516. set_page_private(page, 0);
  517. page_cache_release(page);
  518. }
  519. return ret;
  520. }
  521. static void btree_invalidatepage(struct page *page, unsigned long offset)
  522. {
  523. struct extent_io_tree *tree;
  524. tree = &BTRFS_I(page->mapping->host)->io_tree;
  525. extent_invalidatepage(tree, page, offset);
  526. btree_releasepage(page, GFP_NOFS);
  527. if (PagePrivate(page)) {
  528. invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
  529. ClearPagePrivate(page);
  530. set_page_private(page, 0);
  531. page_cache_release(page);
  532. }
  533. }
  534. #if 0
  535. static int btree_writepage(struct page *page, struct writeback_control *wbc)
  536. {
  537. struct buffer_head *bh;
  538. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  539. struct buffer_head *head;
  540. if (!page_has_buffers(page)) {
  541. create_empty_buffers(page, root->fs_info->sb->s_blocksize,
  542. (1 << BH_Dirty)|(1 << BH_Uptodate));
  543. }
  544. head = page_buffers(page);
  545. bh = head;
  546. do {
  547. if (buffer_dirty(bh))
  548. csum_tree_block(root, bh, 0);
  549. bh = bh->b_this_page;
  550. } while (bh != head);
  551. return block_write_full_page(page, btree_get_block, wbc);
  552. }
  553. #endif
  554. static struct address_space_operations btree_aops = {
  555. .readpage = btree_readpage,
  556. .writepage = btree_writepage,
  557. .writepages = btree_writepages,
  558. .releasepage = btree_releasepage,
  559. .invalidatepage = btree_invalidatepage,
  560. .sync_page = block_sync_page,
  561. };
  562. int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
  563. u64 parent_transid)
  564. {
  565. struct extent_buffer *buf = NULL;
  566. struct inode *btree_inode = root->fs_info->btree_inode;
  567. int ret = 0;
  568. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  569. if (!buf)
  570. return 0;
  571. read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
  572. buf, 0, 0, btree_get_extent, 0);
  573. free_extent_buffer(buf);
  574. return ret;
  575. }
  576. struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
  577. u64 bytenr, u32 blocksize)
  578. {
  579. struct inode *btree_inode = root->fs_info->btree_inode;
  580. struct extent_buffer *eb;
  581. eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
  582. bytenr, blocksize, GFP_NOFS);
  583. return eb;
  584. }
  585. struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
  586. u64 bytenr, u32 blocksize)
  587. {
  588. struct inode *btree_inode = root->fs_info->btree_inode;
  589. struct extent_buffer *eb;
  590. eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
  591. bytenr, blocksize, NULL, GFP_NOFS);
  592. return eb;
  593. }
  594. struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
  595. u32 blocksize, u64 parent_transid)
  596. {
  597. struct extent_buffer *buf = NULL;
  598. struct inode *btree_inode = root->fs_info->btree_inode;
  599. struct extent_io_tree *io_tree;
  600. int ret;
  601. io_tree = &BTRFS_I(btree_inode)->io_tree;
  602. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  603. if (!buf)
  604. return NULL;
  605. ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
  606. if (ret == 0) {
  607. buf->flags |= EXTENT_UPTODATE;
  608. }
  609. return buf;
  610. }
  611. int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  612. struct extent_buffer *buf)
  613. {
  614. struct inode *btree_inode = root->fs_info->btree_inode;
  615. if (btrfs_header_generation(buf) ==
  616. root->fs_info->running_transaction->transid) {
  617. WARN_ON(!btrfs_tree_locked(buf));
  618. clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
  619. buf);
  620. }
  621. return 0;
  622. }
  623. int wait_on_tree_block_writeback(struct btrfs_root *root,
  624. struct extent_buffer *buf)
  625. {
  626. struct inode *btree_inode = root->fs_info->btree_inode;
  627. wait_on_extent_buffer_writeback(&BTRFS_I(btree_inode)->io_tree,
  628. buf);
  629. return 0;
  630. }
  631. static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
  632. u32 stripesize, struct btrfs_root *root,
  633. struct btrfs_fs_info *fs_info,
  634. u64 objectid)
  635. {
  636. root->node = NULL;
  637. root->inode = NULL;
  638. root->commit_root = NULL;
  639. root->sectorsize = sectorsize;
  640. root->nodesize = nodesize;
  641. root->leafsize = leafsize;
  642. root->stripesize = stripesize;
  643. root->ref_cows = 0;
  644. root->track_dirty = 0;
  645. root->fs_info = fs_info;
  646. root->objectid = objectid;
  647. root->last_trans = 0;
  648. root->highest_inode = 0;
  649. root->last_inode_alloc = 0;
  650. root->name = NULL;
  651. root->in_sysfs = 0;
  652. INIT_LIST_HEAD(&root->dirty_list);
  653. spin_lock_init(&root->node_lock);
  654. mutex_init(&root->objectid_mutex);
  655. memset(&root->root_key, 0, sizeof(root->root_key));
  656. memset(&root->root_item, 0, sizeof(root->root_item));
  657. memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
  658. memset(&root->root_kobj, 0, sizeof(root->root_kobj));
  659. root->defrag_trans_start = fs_info->generation;
  660. init_completion(&root->kobj_unregister);
  661. root->defrag_running = 0;
  662. root->defrag_level = 0;
  663. root->root_key.objectid = objectid;
  664. return 0;
  665. }
  666. static int find_and_setup_root(struct btrfs_root *tree_root,
  667. struct btrfs_fs_info *fs_info,
  668. u64 objectid,
  669. struct btrfs_root *root)
  670. {
  671. int ret;
  672. u32 blocksize;
  673. __setup_root(tree_root->nodesize, tree_root->leafsize,
  674. tree_root->sectorsize, tree_root->stripesize,
  675. root, fs_info, objectid);
  676. ret = btrfs_find_last_root(tree_root, objectid,
  677. &root->root_item, &root->root_key);
  678. BUG_ON(ret);
  679. blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
  680. root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
  681. blocksize, 0);
  682. BUG_ON(!root->node);
  683. return 0;
  684. }
  685. struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_fs_info *fs_info,
  686. struct btrfs_key *location)
  687. {
  688. struct btrfs_root *root;
  689. struct btrfs_root *tree_root = fs_info->tree_root;
  690. struct btrfs_path *path;
  691. struct extent_buffer *l;
  692. u64 highest_inode;
  693. u32 blocksize;
  694. int ret = 0;
  695. root = kzalloc(sizeof(*root), GFP_NOFS);
  696. if (!root)
  697. return ERR_PTR(-ENOMEM);
  698. if (location->offset == (u64)-1) {
  699. ret = find_and_setup_root(tree_root, fs_info,
  700. location->objectid, root);
  701. if (ret) {
  702. kfree(root);
  703. return ERR_PTR(ret);
  704. }
  705. goto insert;
  706. }
  707. __setup_root(tree_root->nodesize, tree_root->leafsize,
  708. tree_root->sectorsize, tree_root->stripesize,
  709. root, fs_info, location->objectid);
  710. path = btrfs_alloc_path();
  711. BUG_ON(!path);
  712. ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
  713. if (ret != 0) {
  714. if (ret > 0)
  715. ret = -ENOENT;
  716. goto out;
  717. }
  718. l = path->nodes[0];
  719. read_extent_buffer(l, &root->root_item,
  720. btrfs_item_ptr_offset(l, path->slots[0]),
  721. sizeof(root->root_item));
  722. memcpy(&root->root_key, location, sizeof(*location));
  723. ret = 0;
  724. out:
  725. btrfs_release_path(root, path);
  726. btrfs_free_path(path);
  727. if (ret) {
  728. kfree(root);
  729. return ERR_PTR(ret);
  730. }
  731. blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
  732. root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
  733. blocksize, 0);
  734. BUG_ON(!root->node);
  735. insert:
  736. root->ref_cows = 1;
  737. ret = btrfs_find_highest_inode(root, &highest_inode);
  738. if (ret == 0) {
  739. root->highest_inode = highest_inode;
  740. root->last_inode_alloc = highest_inode;
  741. }
  742. return root;
  743. }
  744. struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
  745. u64 root_objectid)
  746. {
  747. struct btrfs_root *root;
  748. if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
  749. return fs_info->tree_root;
  750. if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
  751. return fs_info->extent_root;
  752. root = radix_tree_lookup(&fs_info->fs_roots_radix,
  753. (unsigned long)root_objectid);
  754. return root;
  755. }
  756. struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
  757. struct btrfs_key *location)
  758. {
  759. struct btrfs_root *root;
  760. int ret;
  761. if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
  762. return fs_info->tree_root;
  763. if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
  764. return fs_info->extent_root;
  765. if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
  766. return fs_info->chunk_root;
  767. if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
  768. return fs_info->dev_root;
  769. root = radix_tree_lookup(&fs_info->fs_roots_radix,
  770. (unsigned long)location->objectid);
  771. if (root)
  772. return root;
  773. root = btrfs_read_fs_root_no_radix(fs_info, location);
  774. if (IS_ERR(root))
  775. return root;
  776. ret = radix_tree_insert(&fs_info->fs_roots_radix,
  777. (unsigned long)root->root_key.objectid,
  778. root);
  779. if (ret) {
  780. free_extent_buffer(root->node);
  781. kfree(root);
  782. return ERR_PTR(ret);
  783. }
  784. ret = btrfs_find_dead_roots(fs_info->tree_root,
  785. root->root_key.objectid, root);
  786. BUG_ON(ret);
  787. return root;
  788. }
  789. struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
  790. struct btrfs_key *location,
  791. const char *name, int namelen)
  792. {
  793. struct btrfs_root *root;
  794. int ret;
  795. root = btrfs_read_fs_root_no_name(fs_info, location);
  796. if (!root)
  797. return NULL;
  798. if (root->in_sysfs)
  799. return root;
  800. ret = btrfs_set_root_name(root, name, namelen);
  801. if (ret) {
  802. free_extent_buffer(root->node);
  803. kfree(root);
  804. return ERR_PTR(ret);
  805. }
  806. ret = btrfs_sysfs_add_root(root);
  807. if (ret) {
  808. free_extent_buffer(root->node);
  809. kfree(root->name);
  810. kfree(root);
  811. return ERR_PTR(ret);
  812. }
  813. root->in_sysfs = 1;
  814. return root;
  815. }
  816. #if 0
  817. static int add_hasher(struct btrfs_fs_info *info, char *type) {
  818. struct btrfs_hasher *hasher;
  819. hasher = kmalloc(sizeof(*hasher), GFP_NOFS);
  820. if (!hasher)
  821. return -ENOMEM;
  822. hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC);
  823. if (!hasher->hash_tfm) {
  824. kfree(hasher);
  825. return -EINVAL;
  826. }
  827. spin_lock(&info->hash_lock);
  828. list_add(&hasher->list, &info->hashers);
  829. spin_unlock(&info->hash_lock);
  830. return 0;
  831. }
  832. #endif
  833. static int btrfs_congested_fn(void *congested_data, int bdi_bits)
  834. {
  835. struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
  836. int ret = 0;
  837. int limit = 256 * info->fs_devices->open_devices;
  838. struct list_head *cur;
  839. struct btrfs_device *device;
  840. struct backing_dev_info *bdi;
  841. if ((bdi_bits & (1 << BDI_write_congested)) &&
  842. atomic_read(&info->nr_async_submits) > limit) {
  843. return 1;
  844. }
  845. list_for_each(cur, &info->fs_devices->devices) {
  846. device = list_entry(cur, struct btrfs_device, dev_list);
  847. if (!device->bdev)
  848. continue;
  849. bdi = blk_get_backing_dev_info(device->bdev);
  850. if (bdi && bdi_congested(bdi, bdi_bits)) {
  851. ret = 1;
  852. break;
  853. }
  854. }
  855. return ret;
  856. }
  857. /*
  858. * this unplugs every device on the box, and it is only used when page
  859. * is null
  860. */
  861. static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
  862. {
  863. struct list_head *cur;
  864. struct btrfs_device *device;
  865. struct btrfs_fs_info *info;
  866. info = (struct btrfs_fs_info *)bdi->unplug_io_data;
  867. list_for_each(cur, &info->fs_devices->devices) {
  868. device = list_entry(cur, struct btrfs_device, dev_list);
  869. bdi = blk_get_backing_dev_info(device->bdev);
  870. if (bdi->unplug_io_fn) {
  871. bdi->unplug_io_fn(bdi, page);
  872. }
  873. }
  874. }
  875. void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
  876. {
  877. struct inode *inode;
  878. struct extent_map_tree *em_tree;
  879. struct extent_map *em;
  880. struct address_space *mapping;
  881. u64 offset;
  882. /* the generic O_DIRECT read code does this */
  883. if (!page) {
  884. __unplug_io_fn(bdi, page);
  885. return;
  886. }
  887. /*
  888. * page->mapping may change at any time. Get a consistent copy
  889. * and use that for everything below
  890. */
  891. smp_mb();
  892. mapping = page->mapping;
  893. if (!mapping)
  894. return;
  895. inode = mapping->host;
  896. offset = page_offset(page);
  897. em_tree = &BTRFS_I(inode)->extent_tree;
  898. spin_lock(&em_tree->lock);
  899. em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
  900. spin_unlock(&em_tree->lock);
  901. if (!em)
  902. return;
  903. offset = offset - em->start;
  904. btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
  905. em->block_start + offset, page);
  906. free_extent_map(em);
  907. }
  908. static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
  909. {
  910. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  911. bdi_init(bdi);
  912. #endif
  913. bdi->ra_pages = default_backing_dev_info.ra_pages;
  914. bdi->state = 0;
  915. bdi->capabilities = default_backing_dev_info.capabilities;
  916. bdi->unplug_io_fn = btrfs_unplug_io_fn;
  917. bdi->unplug_io_data = info;
  918. bdi->congested_fn = btrfs_congested_fn;
  919. bdi->congested_data = info;
  920. return 0;
  921. }
  922. static int bio_ready_for_csum(struct bio *bio)
  923. {
  924. u64 length = 0;
  925. u64 buf_len = 0;
  926. u64 start = 0;
  927. struct page *page;
  928. struct extent_io_tree *io_tree = NULL;
  929. struct btrfs_fs_info *info = NULL;
  930. struct bio_vec *bvec;
  931. int i;
  932. int ret;
  933. bio_for_each_segment(bvec, bio, i) {
  934. page = bvec->bv_page;
  935. if (page->private == EXTENT_PAGE_PRIVATE) {
  936. length += bvec->bv_len;
  937. continue;
  938. }
  939. if (!page->private) {
  940. length += bvec->bv_len;
  941. continue;
  942. }
  943. length = bvec->bv_len;
  944. buf_len = page->private >> 2;
  945. start = page_offset(page) + bvec->bv_offset;
  946. io_tree = &BTRFS_I(page->mapping->host)->io_tree;
  947. info = BTRFS_I(page->mapping->host)->root->fs_info;
  948. }
  949. /* are we fully contained in this bio? */
  950. if (buf_len <= length)
  951. return 1;
  952. ret = extent_range_uptodate(io_tree, start + length,
  953. start + buf_len - 1);
  954. if (ret == 1)
  955. return ret;
  956. return ret;
  957. }
  958. /*
  959. * called by the kthread helper functions to finally call the bio end_io
  960. * functions. This is where read checksum verification actually happens
  961. */
  962. static void end_workqueue_fn(struct btrfs_work *work)
  963. {
  964. struct bio *bio;
  965. struct end_io_wq *end_io_wq;
  966. struct btrfs_fs_info *fs_info;
  967. int error;
  968. end_io_wq = container_of(work, struct end_io_wq, work);
  969. bio = end_io_wq->bio;
  970. fs_info = end_io_wq->info;
  971. /* metadata bios are special because the whole tree block must
  972. * be checksummed at once. This makes sure the entire block is in
  973. * ram and up to date before trying to verify things. For
  974. * blocksize <= pagesize, it is basically a noop
  975. */
  976. if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
  977. btrfs_queue_worker(&fs_info->endio_workers,
  978. &end_io_wq->work);
  979. return;
  980. }
  981. error = end_io_wq->error;
  982. bio->bi_private = end_io_wq->private;
  983. bio->bi_end_io = end_io_wq->end_io;
  984. kfree(end_io_wq);
  985. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  986. bio_endio(bio, bio->bi_size, error);
  987. #else
  988. bio_endio(bio, error);
  989. #endif
  990. }
  991. static int cleaner_kthread(void *arg)
  992. {
  993. struct btrfs_root *root = arg;
  994. do {
  995. smp_mb();
  996. if (root->fs_info->closing)
  997. break;
  998. vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
  999. mutex_lock(&root->fs_info->cleaner_mutex);
  1000. btrfs_clean_old_snapshots(root);
  1001. mutex_unlock(&root->fs_info->cleaner_mutex);
  1002. if (freezing(current)) {
  1003. refrigerator();
  1004. } else {
  1005. smp_mb();
  1006. if (root->fs_info->closing)
  1007. break;
  1008. set_current_state(TASK_INTERRUPTIBLE);
  1009. schedule();
  1010. __set_current_state(TASK_RUNNING);
  1011. }
  1012. } while (!kthread_should_stop());
  1013. return 0;
  1014. }
  1015. static int transaction_kthread(void *arg)
  1016. {
  1017. struct btrfs_root *root = arg;
  1018. struct btrfs_trans_handle *trans;
  1019. struct btrfs_transaction *cur;
  1020. unsigned long now;
  1021. unsigned long delay;
  1022. int ret;
  1023. do {
  1024. smp_mb();
  1025. if (root->fs_info->closing)
  1026. break;
  1027. delay = HZ * 30;
  1028. vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
  1029. mutex_lock(&root->fs_info->transaction_kthread_mutex);
  1030. mutex_lock(&root->fs_info->trans_mutex);
  1031. cur = root->fs_info->running_transaction;
  1032. if (!cur) {
  1033. mutex_unlock(&root->fs_info->trans_mutex);
  1034. goto sleep;
  1035. }
  1036. now = get_seconds();
  1037. if (now < cur->start_time || now - cur->start_time < 30) {
  1038. mutex_unlock(&root->fs_info->trans_mutex);
  1039. delay = HZ * 5;
  1040. goto sleep;
  1041. }
  1042. mutex_unlock(&root->fs_info->trans_mutex);
  1043. trans = btrfs_start_transaction(root, 1);
  1044. ret = btrfs_commit_transaction(trans, root);
  1045. sleep:
  1046. wake_up_process(root->fs_info->cleaner_kthread);
  1047. mutex_unlock(&root->fs_info->transaction_kthread_mutex);
  1048. if (freezing(current)) {
  1049. refrigerator();
  1050. } else {
  1051. if (root->fs_info->closing)
  1052. break;
  1053. set_current_state(TASK_INTERRUPTIBLE);
  1054. schedule_timeout(delay);
  1055. __set_current_state(TASK_RUNNING);
  1056. }
  1057. } while (!kthread_should_stop());
  1058. return 0;
  1059. }
  1060. struct btrfs_root *open_ctree(struct super_block *sb,
  1061. struct btrfs_fs_devices *fs_devices,
  1062. char *options)
  1063. {
  1064. u32 sectorsize;
  1065. u32 nodesize;
  1066. u32 leafsize;
  1067. u32 blocksize;
  1068. u32 stripesize;
  1069. struct buffer_head *bh;
  1070. struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
  1071. GFP_NOFS);
  1072. struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
  1073. GFP_NOFS);
  1074. struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
  1075. GFP_NOFS);
  1076. struct btrfs_root *chunk_root = kmalloc(sizeof(struct btrfs_root),
  1077. GFP_NOFS);
  1078. struct btrfs_root *dev_root = kmalloc(sizeof(struct btrfs_root),
  1079. GFP_NOFS);
  1080. int ret;
  1081. int err = -EINVAL;
  1082. struct btrfs_super_block *disk_super;
  1083. if (!extent_root || !tree_root || !fs_info) {
  1084. err = -ENOMEM;
  1085. goto fail;
  1086. }
  1087. INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
  1088. INIT_LIST_HEAD(&fs_info->trans_list);
  1089. INIT_LIST_HEAD(&fs_info->dead_roots);
  1090. INIT_LIST_HEAD(&fs_info->hashers);
  1091. spin_lock_init(&fs_info->hash_lock);
  1092. spin_lock_init(&fs_info->delalloc_lock);
  1093. spin_lock_init(&fs_info->new_trans_lock);
  1094. init_completion(&fs_info->kobj_unregister);
  1095. fs_info->tree_root = tree_root;
  1096. fs_info->extent_root = extent_root;
  1097. fs_info->chunk_root = chunk_root;
  1098. fs_info->dev_root = dev_root;
  1099. fs_info->fs_devices = fs_devices;
  1100. INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
  1101. INIT_LIST_HEAD(&fs_info->space_info);
  1102. btrfs_mapping_init(&fs_info->mapping_tree);
  1103. atomic_set(&fs_info->nr_async_submits, 0);
  1104. atomic_set(&fs_info->throttles, 0);
  1105. fs_info->sb = sb;
  1106. fs_info->max_extent = (u64)-1;
  1107. fs_info->max_inline = 8192 * 1024;
  1108. setup_bdi(fs_info, &fs_info->bdi);
  1109. fs_info->btree_inode = new_inode(sb);
  1110. fs_info->btree_inode->i_ino = 1;
  1111. fs_info->btree_inode->i_nlink = 1;
  1112. fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
  1113. sb->s_blocksize = 4096;
  1114. sb->s_blocksize_bits = blksize_bits(4096);
  1115. /*
  1116. * we set the i_size on the btree inode to the max possible int.
  1117. * the real end of the address space is determined by all of
  1118. * the devices in the system
  1119. */
  1120. fs_info->btree_inode->i_size = OFFSET_MAX;
  1121. fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
  1122. fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
  1123. extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
  1124. fs_info->btree_inode->i_mapping,
  1125. GFP_NOFS);
  1126. extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
  1127. GFP_NOFS);
  1128. BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
  1129. extent_io_tree_init(&fs_info->free_space_cache,
  1130. fs_info->btree_inode->i_mapping, GFP_NOFS);
  1131. extent_io_tree_init(&fs_info->block_group_cache,
  1132. fs_info->btree_inode->i_mapping, GFP_NOFS);
  1133. extent_io_tree_init(&fs_info->pinned_extents,
  1134. fs_info->btree_inode->i_mapping, GFP_NOFS);
  1135. extent_io_tree_init(&fs_info->pending_del,
  1136. fs_info->btree_inode->i_mapping, GFP_NOFS);
  1137. extent_io_tree_init(&fs_info->extent_ins,
  1138. fs_info->btree_inode->i_mapping, GFP_NOFS);
  1139. fs_info->do_barriers = 1;
  1140. BTRFS_I(fs_info->btree_inode)->root = tree_root;
  1141. memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
  1142. sizeof(struct btrfs_key));
  1143. insert_inode_hash(fs_info->btree_inode);
  1144. mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
  1145. mutex_init(&fs_info->trans_mutex);
  1146. mutex_init(&fs_info->drop_mutex);
  1147. mutex_init(&fs_info->alloc_mutex);
  1148. mutex_init(&fs_info->chunk_mutex);
  1149. mutex_init(&fs_info->transaction_kthread_mutex);
  1150. mutex_init(&fs_info->cleaner_mutex);
  1151. mutex_init(&fs_info->volume_mutex);
  1152. init_waitqueue_head(&fs_info->transaction_throttle);
  1153. #if 0
  1154. ret = add_hasher(fs_info, "crc32c");
  1155. if (ret) {
  1156. printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
  1157. err = -ENOMEM;
  1158. goto fail_iput;
  1159. }
  1160. #endif
  1161. __setup_root(4096, 4096, 4096, 4096, tree_root,
  1162. fs_info, BTRFS_ROOT_TREE_OBJECTID);
  1163. bh = __bread(fs_devices->latest_bdev,
  1164. BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
  1165. if (!bh)
  1166. goto fail_iput;
  1167. memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
  1168. brelse(bh);
  1169. memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
  1170. disk_super = &fs_info->super_copy;
  1171. if (!btrfs_super_root(disk_super))
  1172. goto fail_sb_buffer;
  1173. err = btrfs_parse_options(tree_root, options);
  1174. if (err)
  1175. goto fail_sb_buffer;
  1176. /*
  1177. * we need to start all the end_io workers up front because the
  1178. * queue work function gets called at interrupt time, and so it
  1179. * cannot dynamically grow.
  1180. */
  1181. btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size);
  1182. btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size);
  1183. btrfs_init_workers(&fs_info->fixup_workers, 1);
  1184. btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
  1185. btrfs_init_workers(&fs_info->endio_write_workers,
  1186. fs_info->thread_pool_size);
  1187. btrfs_start_workers(&fs_info->workers, 1);
  1188. btrfs_start_workers(&fs_info->submit_workers, 1);
  1189. btrfs_start_workers(&fs_info->fixup_workers, 1);
  1190. btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
  1191. btrfs_start_workers(&fs_info->endio_write_workers,
  1192. fs_info->thread_pool_size);
  1193. err = -EINVAL;
  1194. if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
  1195. printk("Btrfs: wanted %llu devices, but found %llu\n",
  1196. (unsigned long long)btrfs_super_num_devices(disk_super),
  1197. (unsigned long long)fs_devices->open_devices);
  1198. if (btrfs_test_opt(tree_root, DEGRADED))
  1199. printk("continuing in degraded mode\n");
  1200. else {
  1201. goto fail_sb_buffer;
  1202. }
  1203. }
  1204. fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
  1205. nodesize = btrfs_super_nodesize(disk_super);
  1206. leafsize = btrfs_super_leafsize(disk_super);
  1207. sectorsize = btrfs_super_sectorsize(disk_super);
  1208. stripesize = btrfs_super_stripesize(disk_super);
  1209. tree_root->nodesize = nodesize;
  1210. tree_root->leafsize = leafsize;
  1211. tree_root->sectorsize = sectorsize;
  1212. tree_root->stripesize = stripesize;
  1213. sb->s_blocksize = sectorsize;
  1214. sb->s_blocksize_bits = blksize_bits(sectorsize);
  1215. if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
  1216. sizeof(disk_super->magic))) {
  1217. printk("btrfs: valid FS not found on %s\n", sb->s_id);
  1218. goto fail_sb_buffer;
  1219. }
  1220. mutex_lock(&fs_info->chunk_mutex);
  1221. ret = btrfs_read_sys_array(tree_root);
  1222. mutex_unlock(&fs_info->chunk_mutex);
  1223. if (ret) {
  1224. printk("btrfs: failed to read the system array on %s\n",
  1225. sb->s_id);
  1226. goto fail_sys_array;
  1227. }
  1228. blocksize = btrfs_level_size(tree_root,
  1229. btrfs_super_chunk_root_level(disk_super));
  1230. __setup_root(nodesize, leafsize, sectorsize, stripesize,
  1231. chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
  1232. chunk_root->node = read_tree_block(chunk_root,
  1233. btrfs_super_chunk_root(disk_super),
  1234. blocksize, 0);
  1235. BUG_ON(!chunk_root->node);
  1236. read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
  1237. (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
  1238. BTRFS_UUID_SIZE);
  1239. mutex_lock(&fs_info->chunk_mutex);
  1240. ret = btrfs_read_chunk_tree(chunk_root);
  1241. mutex_unlock(&fs_info->chunk_mutex);
  1242. BUG_ON(ret);
  1243. btrfs_close_extra_devices(fs_devices);
  1244. blocksize = btrfs_level_size(tree_root,
  1245. btrfs_super_root_level(disk_super));
  1246. tree_root->node = read_tree_block(tree_root,
  1247. btrfs_super_root(disk_super),
  1248. blocksize, 0);
  1249. if (!tree_root->node)
  1250. goto fail_sb_buffer;
  1251. ret = find_and_setup_root(tree_root, fs_info,
  1252. BTRFS_EXTENT_TREE_OBJECTID, extent_root);
  1253. if (ret)
  1254. goto fail_tree_root;
  1255. extent_root->track_dirty = 1;
  1256. ret = find_and_setup_root(tree_root, fs_info,
  1257. BTRFS_DEV_TREE_OBJECTID, dev_root);
  1258. dev_root->track_dirty = 1;
  1259. if (ret)
  1260. goto fail_extent_root;
  1261. btrfs_read_block_groups(extent_root);
  1262. fs_info->generation = btrfs_super_generation(disk_super) + 1;
  1263. fs_info->data_alloc_profile = (u64)-1;
  1264. fs_info->metadata_alloc_profile = (u64)-1;
  1265. fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
  1266. fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
  1267. "btrfs-cleaner");
  1268. if (!fs_info->cleaner_kthread)
  1269. goto fail_extent_root;
  1270. fs_info->transaction_kthread = kthread_run(transaction_kthread,
  1271. tree_root,
  1272. "btrfs-transaction");
  1273. if (!fs_info->transaction_kthread)
  1274. goto fail_cleaner;
  1275. return tree_root;
  1276. fail_cleaner:
  1277. kthread_stop(fs_info->cleaner_kthread);
  1278. fail_extent_root:
  1279. free_extent_buffer(extent_root->node);
  1280. fail_tree_root:
  1281. free_extent_buffer(tree_root->node);
  1282. fail_sys_array:
  1283. fail_sb_buffer:
  1284. extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
  1285. btrfs_stop_workers(&fs_info->fixup_workers);
  1286. btrfs_stop_workers(&fs_info->workers);
  1287. btrfs_stop_workers(&fs_info->endio_workers);
  1288. btrfs_stop_workers(&fs_info->endio_write_workers);
  1289. btrfs_stop_workers(&fs_info->submit_workers);
  1290. fail_iput:
  1291. iput(fs_info->btree_inode);
  1292. fail:
  1293. btrfs_close_devices(fs_info->fs_devices);
  1294. btrfs_mapping_tree_free(&fs_info->mapping_tree);
  1295. kfree(extent_root);
  1296. kfree(tree_root);
  1297. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  1298. bdi_destroy(&fs_info->bdi);
  1299. #endif
  1300. kfree(fs_info);
  1301. return ERR_PTR(err);
  1302. }
  1303. static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
  1304. {
  1305. char b[BDEVNAME_SIZE];
  1306. if (uptodate) {
  1307. set_buffer_uptodate(bh);
  1308. } else {
  1309. if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
  1310. printk(KERN_WARNING "lost page write due to "
  1311. "I/O error on %s\n",
  1312. bdevname(bh->b_bdev, b));
  1313. }
  1314. /* note, we dont' set_buffer_write_io_error because we have
  1315. * our own ways of dealing with the IO errors
  1316. */
  1317. clear_buffer_uptodate(bh);
  1318. }
  1319. unlock_buffer(bh);
  1320. put_bh(bh);
  1321. }
  1322. int write_all_supers(struct btrfs_root *root)
  1323. {
  1324. struct list_head *cur;
  1325. struct list_head *head = &root->fs_info->fs_devices->devices;
  1326. struct btrfs_device *dev;
  1327. struct btrfs_super_block *sb;
  1328. struct btrfs_dev_item *dev_item;
  1329. struct buffer_head *bh;
  1330. int ret;
  1331. int do_barriers;
  1332. int max_errors;
  1333. int total_errors = 0;
  1334. u32 crc;
  1335. u64 flags;
  1336. max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
  1337. do_barriers = !btrfs_test_opt(root, NOBARRIER);
  1338. sb = &root->fs_info->super_for_commit;
  1339. dev_item = &sb->dev_item;
  1340. list_for_each(cur, head) {
  1341. dev = list_entry(cur, struct btrfs_device, dev_list);
  1342. if (!dev->bdev) {
  1343. total_errors++;
  1344. continue;
  1345. }
  1346. if (!dev->in_fs_metadata)
  1347. continue;
  1348. btrfs_set_stack_device_type(dev_item, dev->type);
  1349. btrfs_set_stack_device_id(dev_item, dev->devid);
  1350. btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
  1351. btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
  1352. btrfs_set_stack_device_io_align(dev_item, dev->io_align);
  1353. btrfs_set_stack_device_io_width(dev_item, dev->io_width);
  1354. btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
  1355. memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
  1356. flags = btrfs_super_flags(sb);
  1357. btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
  1358. crc = ~(u32)0;
  1359. crc = btrfs_csum_data(root, (char *)sb + BTRFS_CSUM_SIZE, crc,
  1360. BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
  1361. btrfs_csum_final(crc, sb->csum);
  1362. bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET / 4096,
  1363. BTRFS_SUPER_INFO_SIZE);
  1364. memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
  1365. dev->pending_io = bh;
  1366. get_bh(bh);
  1367. set_buffer_uptodate(bh);
  1368. lock_buffer(bh);
  1369. bh->b_end_io = btrfs_end_buffer_write_sync;
  1370. if (do_barriers && dev->barriers) {
  1371. ret = submit_bh(WRITE_BARRIER, bh);
  1372. if (ret == -EOPNOTSUPP) {
  1373. printk("btrfs: disabling barriers on dev %s\n",
  1374. dev->name);
  1375. set_buffer_uptodate(bh);
  1376. dev->barriers = 0;
  1377. get_bh(bh);
  1378. lock_buffer(bh);
  1379. ret = submit_bh(WRITE, bh);
  1380. }
  1381. } else {
  1382. ret = submit_bh(WRITE, bh);
  1383. }
  1384. if (ret)
  1385. total_errors++;
  1386. }
  1387. if (total_errors > max_errors) {
  1388. printk("btrfs: %d errors while writing supers\n", total_errors);
  1389. BUG();
  1390. }
  1391. total_errors = 0;
  1392. list_for_each(cur, head) {
  1393. dev = list_entry(cur, struct btrfs_device, dev_list);
  1394. if (!dev->bdev)
  1395. continue;
  1396. if (!dev->in_fs_metadata)
  1397. continue;
  1398. BUG_ON(!dev->pending_io);
  1399. bh = dev->pending_io;
  1400. wait_on_buffer(bh);
  1401. if (!buffer_uptodate(dev->pending_io)) {
  1402. if (do_barriers && dev->barriers) {
  1403. printk("btrfs: disabling barriers on dev %s\n",
  1404. dev->name);
  1405. set_buffer_uptodate(bh);
  1406. get_bh(bh);
  1407. lock_buffer(bh);
  1408. dev->barriers = 0;
  1409. ret = submit_bh(WRITE, bh);
  1410. BUG_ON(ret);
  1411. wait_on_buffer(bh);
  1412. if (!buffer_uptodate(bh))
  1413. total_errors++;
  1414. } else {
  1415. total_errors++;
  1416. }
  1417. }
  1418. dev->pending_io = NULL;
  1419. brelse(bh);
  1420. }
  1421. if (total_errors > max_errors) {
  1422. printk("btrfs: %d errors while writing supers\n", total_errors);
  1423. BUG();
  1424. }
  1425. return 0;
  1426. }
  1427. int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
  1428. *root)
  1429. {
  1430. int ret;
  1431. ret = write_all_supers(root);
  1432. return ret;
  1433. }
  1434. int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
  1435. {
  1436. radix_tree_delete(&fs_info->fs_roots_radix,
  1437. (unsigned long)root->root_key.objectid);
  1438. if (root->in_sysfs)
  1439. btrfs_sysfs_del_root(root);
  1440. if (root->inode)
  1441. iput(root->inode);
  1442. if (root->node)
  1443. free_extent_buffer(root->node);
  1444. if (root->commit_root)
  1445. free_extent_buffer(root->commit_root);
  1446. if (root->name)
  1447. kfree(root->name);
  1448. kfree(root);
  1449. return 0;
  1450. }
  1451. static int del_fs_roots(struct btrfs_fs_info *fs_info)
  1452. {
  1453. int ret;
  1454. struct btrfs_root *gang[8];
  1455. int i;
  1456. while(1) {
  1457. ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
  1458. (void **)gang, 0,
  1459. ARRAY_SIZE(gang));
  1460. if (!ret)
  1461. break;
  1462. for (i = 0; i < ret; i++)
  1463. btrfs_free_fs_root(fs_info, gang[i]);
  1464. }
  1465. return 0;
  1466. }
  1467. int close_ctree(struct btrfs_root *root)
  1468. {
  1469. int ret;
  1470. struct btrfs_trans_handle *trans;
  1471. struct btrfs_fs_info *fs_info = root->fs_info;
  1472. fs_info->closing = 1;
  1473. smp_mb();
  1474. kthread_stop(root->fs_info->transaction_kthread);
  1475. kthread_stop(root->fs_info->cleaner_kthread);
  1476. btrfs_clean_old_snapshots(root);
  1477. trans = btrfs_start_transaction(root, 1);
  1478. ret = btrfs_commit_transaction(trans, root);
  1479. /* run commit again to drop the original snapshot */
  1480. trans = btrfs_start_transaction(root, 1);
  1481. btrfs_commit_transaction(trans, root);
  1482. ret = btrfs_write_and_wait_transaction(NULL, root);
  1483. BUG_ON(ret);
  1484. write_ctree_super(NULL, root);
  1485. if (fs_info->delalloc_bytes) {
  1486. printk("btrfs: at unmount delalloc count %Lu\n",
  1487. fs_info->delalloc_bytes);
  1488. }
  1489. if (fs_info->extent_root->node)
  1490. free_extent_buffer(fs_info->extent_root->node);
  1491. if (fs_info->tree_root->node)
  1492. free_extent_buffer(fs_info->tree_root->node);
  1493. if (root->fs_info->chunk_root->node);
  1494. free_extent_buffer(root->fs_info->chunk_root->node);
  1495. if (root->fs_info->dev_root->node);
  1496. free_extent_buffer(root->fs_info->dev_root->node);
  1497. btrfs_free_block_groups(root->fs_info);
  1498. del_fs_roots(fs_info);
  1499. filemap_write_and_wait(fs_info->btree_inode->i_mapping);
  1500. extent_io_tree_empty_lru(&fs_info->free_space_cache);
  1501. extent_io_tree_empty_lru(&fs_info->block_group_cache);
  1502. extent_io_tree_empty_lru(&fs_info->pinned_extents);
  1503. extent_io_tree_empty_lru(&fs_info->pending_del);
  1504. extent_io_tree_empty_lru(&fs_info->extent_ins);
  1505. extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
  1506. truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
  1507. btrfs_stop_workers(&fs_info->fixup_workers);
  1508. btrfs_stop_workers(&fs_info->workers);
  1509. btrfs_stop_workers(&fs_info->endio_workers);
  1510. btrfs_stop_workers(&fs_info->endio_write_workers);
  1511. btrfs_stop_workers(&fs_info->submit_workers);
  1512. iput(fs_info->btree_inode);
  1513. #if 0
  1514. while(!list_empty(&fs_info->hashers)) {
  1515. struct btrfs_hasher *hasher;
  1516. hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
  1517. hashers);
  1518. list_del(&hasher->hashers);
  1519. crypto_free_hash(&fs_info->hash_tfm);
  1520. kfree(hasher);
  1521. }
  1522. #endif
  1523. btrfs_close_devices(fs_info->fs_devices);
  1524. btrfs_mapping_tree_free(&fs_info->mapping_tree);
  1525. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  1526. bdi_destroy(&fs_info->bdi);
  1527. #endif
  1528. kfree(fs_info->extent_root);
  1529. kfree(fs_info->tree_root);
  1530. kfree(fs_info->chunk_root);
  1531. kfree(fs_info->dev_root);
  1532. return 0;
  1533. }
  1534. int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
  1535. {
  1536. int ret;
  1537. struct inode *btree_inode = buf->first_page->mapping->host;
  1538. ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
  1539. if (!ret)
  1540. return ret;
  1541. ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
  1542. parent_transid);
  1543. return !ret;
  1544. }
  1545. int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
  1546. {
  1547. struct inode *btree_inode = buf->first_page->mapping->host;
  1548. return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
  1549. buf);
  1550. }
  1551. void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
  1552. {
  1553. struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
  1554. u64 transid = btrfs_header_generation(buf);
  1555. struct inode *btree_inode = root->fs_info->btree_inode;
  1556. WARN_ON(!btrfs_tree_locked(buf));
  1557. if (transid != root->fs_info->generation) {
  1558. printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
  1559. (unsigned long long)buf->start,
  1560. transid, root->fs_info->generation);
  1561. WARN_ON(1);
  1562. }
  1563. set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
  1564. }
  1565. void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
  1566. {
  1567. /*
  1568. * looks as though older kernels can get into trouble with
  1569. * this code, they end up stuck in balance_dirty_pages forever
  1570. */
  1571. struct extent_io_tree *tree;
  1572. u64 num_dirty;
  1573. u64 start = 0;
  1574. unsigned long thresh = 16 * 1024 * 1024;
  1575. tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
  1576. if (current_is_pdflush())
  1577. return;
  1578. num_dirty = count_range_bits(tree, &start, (u64)-1,
  1579. thresh, EXTENT_DIRTY);
  1580. if (num_dirty > thresh) {
  1581. balance_dirty_pages_ratelimited_nr(
  1582. root->fs_info->btree_inode->i_mapping, 1);
  1583. }
  1584. return;
  1585. }
  1586. int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
  1587. {
  1588. struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
  1589. int ret;
  1590. ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
  1591. if (ret == 0) {
  1592. buf->flags |= EXTENT_UPTODATE;
  1593. }
  1594. return ret;
  1595. }
  1596. static struct extent_io_ops btree_extent_io_ops = {
  1597. .writepage_io_hook = btree_writepage_io_hook,
  1598. .readpage_end_io_hook = btree_readpage_end_io_hook,
  1599. .submit_bio_hook = btree_submit_bio_hook,
  1600. /* note we're sharing with inode.c for the merge bio hook */
  1601. .merge_bio_hook = btrfs_merge_bio_hook,
  1602. };