meta_io.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/mm.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/writeback.h>
  17. #include <linux/swap.h>
  18. #include <linux/delay.h>
  19. #include <linux/bio.h>
  20. #include <linux/gfs2_ondisk.h>
  21. #include <linux/lm_interface.h>
  22. #include "gfs2.h"
  23. #include "incore.h"
  24. #include "glock.h"
  25. #include "glops.h"
  26. #include "inode.h"
  27. #include "log.h"
  28. #include "lops.h"
  29. #include "meta_io.h"
  30. #include "rgrp.h"
  31. #include "trans.h"
  32. #include "util.h"
  33. #include "ops_address.h"
  34. static int aspace_get_block(struct inode *inode, sector_t lblock,
  35. struct buffer_head *bh_result, int create)
  36. {
  37. gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
  38. return -EOPNOTSUPP;
  39. }
  40. static int gfs2_aspace_writepage(struct page *page,
  41. struct writeback_control *wbc)
  42. {
  43. return block_write_full_page(page, aspace_get_block, wbc);
  44. }
  45. static const struct address_space_operations aspace_aops = {
  46. .writepage = gfs2_aspace_writepage,
  47. .releasepage = gfs2_releasepage,
  48. };
  49. /**
  50. * gfs2_aspace_get - Create and initialize a struct inode structure
  51. * @sdp: the filesystem the aspace is in
  52. *
  53. * Right now a struct inode is just a struct inode. Maybe Linux
  54. * will supply a more lightweight address space construct (that works)
  55. * in the future.
  56. *
  57. * Make sure pages/buffers in this aspace aren't in high memory.
  58. *
  59. * Returns: the aspace
  60. */
  61. struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
  62. {
  63. struct inode *aspace;
  64. aspace = new_inode(sdp->sd_vfs);
  65. if (aspace) {
  66. mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
  67. aspace->i_mapping->a_ops = &aspace_aops;
  68. aspace->i_size = ~0ULL;
  69. aspace->i_private = NULL;
  70. insert_inode_hash(aspace);
  71. }
  72. return aspace;
  73. }
  74. void gfs2_aspace_put(struct inode *aspace)
  75. {
  76. remove_inode_hash(aspace);
  77. iput(aspace);
  78. }
  79. /**
  80. * gfs2_meta_inval - Invalidate all buffers associated with a glock
  81. * @gl: the glock
  82. *
  83. */
  84. void gfs2_meta_inval(struct gfs2_glock *gl)
  85. {
  86. struct gfs2_sbd *sdp = gl->gl_sbd;
  87. struct inode *aspace = gl->gl_aspace;
  88. struct address_space *mapping = gl->gl_aspace->i_mapping;
  89. gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
  90. atomic_inc(&aspace->i_writecount);
  91. truncate_inode_pages(mapping, 0);
  92. atomic_dec(&aspace->i_writecount);
  93. gfs2_assert_withdraw(sdp, !mapping->nrpages);
  94. }
  95. /**
  96. * gfs2_meta_sync - Sync all buffers associated with a glock
  97. * @gl: The glock
  98. *
  99. */
  100. void gfs2_meta_sync(struct gfs2_glock *gl)
  101. {
  102. struct address_space *mapping = gl->gl_aspace->i_mapping;
  103. int error;
  104. filemap_fdatawrite(mapping);
  105. error = filemap_fdatawait(mapping);
  106. if (error)
  107. gfs2_io_error(gl->gl_sbd);
  108. }
  109. /**
  110. * getbuf - Get a buffer with a given address space
  111. * @gl: the glock
  112. * @blkno: the block number (filesystem scope)
  113. * @create: 1 if the buffer should be created
  114. *
  115. * Returns: the buffer
  116. */
  117. static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create)
  118. {
  119. struct address_space *mapping = gl->gl_aspace->i_mapping;
  120. struct gfs2_sbd *sdp = gl->gl_sbd;
  121. struct page *page;
  122. struct buffer_head *bh;
  123. unsigned int shift;
  124. unsigned long index;
  125. unsigned int bufnum;
  126. shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
  127. index = blkno >> shift; /* convert block to page */
  128. bufnum = blkno - (index << shift); /* block buf index within page */
  129. if (create) {
  130. for (;;) {
  131. page = grab_cache_page(mapping, index);
  132. if (page)
  133. break;
  134. yield();
  135. }
  136. } else {
  137. page = find_lock_page(mapping, index);
  138. if (!page)
  139. return NULL;
  140. }
  141. if (!page_has_buffers(page))
  142. create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
  143. /* Locate header for our buffer within our page */
  144. for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
  145. /* Do nothing */;
  146. get_bh(bh);
  147. if (!buffer_mapped(bh))
  148. map_bh(bh, sdp->sd_vfs, blkno);
  149. unlock_page(page);
  150. mark_page_accessed(page);
  151. page_cache_release(page);
  152. return bh;
  153. }
  154. static void meta_prep_new(struct buffer_head *bh)
  155. {
  156. struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
  157. lock_buffer(bh);
  158. clear_buffer_dirty(bh);
  159. set_buffer_uptodate(bh);
  160. unlock_buffer(bh);
  161. mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
  162. }
  163. /**
  164. * gfs2_meta_new - Get a block
  165. * @gl: The glock associated with this block
  166. * @blkno: The block number
  167. *
  168. * Returns: The buffer
  169. */
  170. struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
  171. {
  172. struct buffer_head *bh;
  173. bh = getbuf(gl, blkno, CREATE);
  174. meta_prep_new(bh);
  175. return bh;
  176. }
  177. /**
  178. * gfs2_meta_read - Read a block from disk
  179. * @gl: The glock covering the block
  180. * @blkno: The block number
  181. * @flags: flags
  182. * @bhp: the place where the buffer is returned (NULL on failure)
  183. *
  184. * Returns: errno
  185. */
  186. int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
  187. struct buffer_head **bhp)
  188. {
  189. *bhp = getbuf(gl, blkno, CREATE);
  190. if (!buffer_uptodate(*bhp))
  191. ll_rw_block(READ_META, 1, bhp);
  192. if (flags & DIO_WAIT) {
  193. int error = gfs2_meta_wait(gl->gl_sbd, *bhp);
  194. if (error) {
  195. brelse(*bhp);
  196. return error;
  197. }
  198. }
  199. return 0;
  200. }
  201. /**
  202. * gfs2_meta_wait - Reread a block from disk
  203. * @sdp: the filesystem
  204. * @bh: The block to wait for
  205. *
  206. * Returns: errno
  207. */
  208. int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
  209. {
  210. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  211. return -EIO;
  212. wait_on_buffer(bh);
  213. if (!buffer_uptodate(bh)) {
  214. struct gfs2_trans *tr = current->journal_info;
  215. if (tr && tr->tr_touched)
  216. gfs2_io_error_bh(sdp, bh);
  217. return -EIO;
  218. }
  219. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  220. return -EIO;
  221. return 0;
  222. }
  223. /**
  224. * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
  225. * @gl: the glock the buffer belongs to
  226. * @bh: The buffer to be attached to
  227. * @meta: Flag to indicate whether its metadata or not
  228. */
  229. void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
  230. int meta)
  231. {
  232. struct gfs2_bufdata *bd;
  233. if (meta)
  234. lock_page(bh->b_page);
  235. if (bh->b_private) {
  236. if (meta)
  237. unlock_page(bh->b_page);
  238. return;
  239. }
  240. bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
  241. bd->bd_bh = bh;
  242. bd->bd_gl = gl;
  243. INIT_LIST_HEAD(&bd->bd_list_tr);
  244. if (meta)
  245. lops_init_le(&bd->bd_le, &gfs2_buf_lops);
  246. else
  247. lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
  248. bh->b_private = bd;
  249. if (meta)
  250. unlock_page(bh->b_page);
  251. }
  252. /**
  253. * gfs2_pin - Pin a buffer in memory
  254. * @sdp: the filesystem the buffer belongs to
  255. * @bh: The buffer to be pinned
  256. *
  257. */
  258. void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
  259. {
  260. struct gfs2_bufdata *bd = bh->b_private;
  261. gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
  262. if (test_set_buffer_pinned(bh))
  263. gfs2_assert_withdraw(sdp, 0);
  264. wait_on_buffer(bh);
  265. /* If this buffer is in the AIL and it has already been written
  266. to in-place disk block, remove it from the AIL. */
  267. gfs2_log_lock(sdp);
  268. if (bd->bd_ail && !buffer_in_io(bh))
  269. list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
  270. gfs2_log_unlock(sdp);
  271. clear_buffer_dirty(bh);
  272. wait_on_buffer(bh);
  273. if (!buffer_uptodate(bh))
  274. gfs2_io_error_bh(sdp, bh);
  275. get_bh(bh);
  276. }
  277. /**
  278. * gfs2_unpin - Unpin a buffer
  279. * @sdp: the filesystem the buffer belongs to
  280. * @bh: The buffer to unpin
  281. * @ai:
  282. *
  283. */
  284. void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
  285. struct gfs2_ail *ai)
  286. {
  287. struct gfs2_bufdata *bd = bh->b_private;
  288. gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
  289. if (!buffer_pinned(bh))
  290. gfs2_assert_withdraw(sdp, 0);
  291. mark_buffer_dirty(bh);
  292. clear_buffer_pinned(bh);
  293. gfs2_log_lock(sdp);
  294. if (bd->bd_ail) {
  295. list_del(&bd->bd_ail_st_list);
  296. brelse(bh);
  297. } else {
  298. struct gfs2_glock *gl = bd->bd_gl;
  299. list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
  300. atomic_inc(&gl->gl_ail_count);
  301. }
  302. bd->bd_ail = ai;
  303. list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
  304. gfs2_log_unlock(sdp);
  305. }
  306. /**
  307. * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
  308. * @ip: the inode who owns the buffers
  309. * @bstart: the first buffer in the run
  310. * @blen: the number of buffers in the run
  311. *
  312. */
  313. void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
  314. {
  315. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  316. struct buffer_head *bh;
  317. while (blen) {
  318. bh = getbuf(ip->i_gl, bstart, NO_CREATE);
  319. if (bh) {
  320. struct gfs2_bufdata *bd = bh->b_private;
  321. if (test_clear_buffer_pinned(bh)) {
  322. struct gfs2_trans *tr = current->journal_info;
  323. struct gfs2_inode *bh_ip =
  324. GFS2_I(bh->b_page->mapping->host);
  325. gfs2_log_lock(sdp);
  326. list_del_init(&bd->bd_le.le_list);
  327. gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
  328. sdp->sd_log_num_buf--;
  329. gfs2_log_unlock(sdp);
  330. if (bh_ip->i_inode.i_private != NULL)
  331. tr->tr_num_databuf_rm++;
  332. else
  333. tr->tr_num_buf_rm++;
  334. brelse(bh);
  335. }
  336. if (bd) {
  337. gfs2_log_lock(sdp);
  338. if (bd->bd_ail) {
  339. u64 blkno = bh->b_blocknr;
  340. bd->bd_ail = NULL;
  341. list_del(&bd->bd_ail_st_list);
  342. list_del(&bd->bd_ail_gl_list);
  343. atomic_dec(&bd->bd_gl->gl_ail_count);
  344. brelse(bh);
  345. gfs2_log_unlock(sdp);
  346. gfs2_trans_add_revoke(sdp, blkno);
  347. } else
  348. gfs2_log_unlock(sdp);
  349. }
  350. lock_buffer(bh);
  351. clear_buffer_dirty(bh);
  352. clear_buffer_uptodate(bh);
  353. unlock_buffer(bh);
  354. brelse(bh);
  355. }
  356. bstart++;
  357. blen--;
  358. }
  359. }
  360. /**
  361. * gfs2_meta_cache_flush - get rid of any references on buffers for this inode
  362. * @ip: The GFS2 inode
  363. *
  364. * This releases buffers that are in the most-recently-used array of
  365. * blocks used for indirect block addressing for this inode.
  366. */
  367. void gfs2_meta_cache_flush(struct gfs2_inode *ip)
  368. {
  369. struct buffer_head **bh_slot;
  370. unsigned int x;
  371. spin_lock(&ip->i_spin);
  372. for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) {
  373. bh_slot = &ip->i_cache[x];
  374. if (!*bh_slot)
  375. break;
  376. brelse(*bh_slot);
  377. *bh_slot = NULL;
  378. }
  379. spin_unlock(&ip->i_spin);
  380. }
  381. /**
  382. * gfs2_meta_indirect_buffer - Get a metadata buffer
  383. * @ip: The GFS2 inode
  384. * @height: The level of this buf in the metadata (indir addr) tree (if any)
  385. * @num: The block number (device relative) of the buffer
  386. * @new: Non-zero if we may create a new buffer
  387. * @bhp: the buffer is returned here
  388. *
  389. * Try to use the gfs2_inode's MRU metadata tree cache.
  390. *
  391. * Returns: errno
  392. */
  393. int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
  394. int new, struct buffer_head **bhp)
  395. {
  396. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  397. struct gfs2_glock *gl = ip->i_gl;
  398. struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height;
  399. int in_cache = 0;
  400. BUG_ON(!gl);
  401. BUG_ON(!sdp);
  402. spin_lock(&ip->i_spin);
  403. if (*bh_slot && (*bh_slot)->b_blocknr == num) {
  404. bh = *bh_slot;
  405. get_bh(bh);
  406. in_cache = 1;
  407. }
  408. spin_unlock(&ip->i_spin);
  409. if (!bh)
  410. bh = getbuf(gl, num, CREATE);
  411. if (!bh)
  412. return -ENOBUFS;
  413. if (new) {
  414. if (gfs2_assert_warn(sdp, height))
  415. goto err;
  416. meta_prep_new(bh);
  417. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  418. gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
  419. gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
  420. } else {
  421. u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
  422. if (!buffer_uptodate(bh)) {
  423. ll_rw_block(READ_META, 1, &bh);
  424. if (gfs2_meta_wait(sdp, bh))
  425. goto err;
  426. }
  427. if (gfs2_metatype_check(sdp, bh, mtype))
  428. goto err;
  429. }
  430. if (!in_cache) {
  431. spin_lock(&ip->i_spin);
  432. if (*bh_slot)
  433. brelse(*bh_slot);
  434. *bh_slot = bh;
  435. get_bh(bh);
  436. spin_unlock(&ip->i_spin);
  437. }
  438. *bhp = bh;
  439. return 0;
  440. err:
  441. brelse(bh);
  442. return -EIO;
  443. }
  444. /**
  445. * gfs2_meta_ra - start readahead on an extent of a file
  446. * @gl: the glock the blocks belong to
  447. * @dblock: the starting disk block
  448. * @extlen: the number of blocks in the extent
  449. *
  450. * returns: the first buffer in the extent
  451. */
  452. struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
  453. {
  454. struct gfs2_sbd *sdp = gl->gl_sbd;
  455. struct buffer_head *first_bh, *bh;
  456. u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
  457. sdp->sd_sb.sb_bsize_shift;
  458. BUG_ON(!extlen);
  459. if (max_ra < 1)
  460. max_ra = 1;
  461. if (extlen > max_ra)
  462. extlen = max_ra;
  463. first_bh = getbuf(gl, dblock, CREATE);
  464. if (buffer_uptodate(first_bh))
  465. goto out;
  466. if (!buffer_locked(first_bh))
  467. ll_rw_block(READ_META, 1, &first_bh);
  468. dblock++;
  469. extlen--;
  470. while (extlen) {
  471. bh = getbuf(gl, dblock, CREATE);
  472. if (!buffer_uptodate(bh) && !buffer_locked(bh))
  473. ll_rw_block(READA, 1, &bh);
  474. brelse(bh);
  475. dblock++;
  476. extlen--;
  477. if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
  478. goto out;
  479. }
  480. wait_on_buffer(first_bh);
  481. out:
  482. return first_bh;
  483. }