meta_io.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/mm.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/writeback.h>
  17. #include <linux/swap.h>
  18. #include <linux/delay.h>
  19. #include <linux/bio.h>
  20. #include <linux/gfs2_ondisk.h>
  21. #include <linux/lm_interface.h>
  22. #include "gfs2.h"
  23. #include "incore.h"
  24. #include "glock.h"
  25. #include "glops.h"
  26. #include "inode.h"
  27. #include "log.h"
  28. #include "lops.h"
  29. #include "meta_io.h"
  30. #include "rgrp.h"
  31. #include "trans.h"
  32. #include "util.h"
  33. #include "ops_address.h"
  34. static int aspace_get_block(struct inode *inode, sector_t lblock,
  35. struct buffer_head *bh_result, int create)
  36. {
  37. gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
  38. return -EOPNOTSUPP;
  39. }
  40. static int gfs2_aspace_writepage(struct page *page,
  41. struct writeback_control *wbc)
  42. {
  43. return block_write_full_page(page, aspace_get_block, wbc);
  44. }
  45. static const struct address_space_operations aspace_aops = {
  46. .writepage = gfs2_aspace_writepage,
  47. .releasepage = gfs2_releasepage,
  48. };
  49. /**
  50. * gfs2_aspace_get - Create and initialize a struct inode structure
  51. * @sdp: the filesystem the aspace is in
  52. *
  53. * Right now a struct inode is just a struct inode. Maybe Linux
  54. * will supply a more lightweight address space construct (that works)
  55. * in the future.
  56. *
  57. * Make sure pages/buffers in this aspace aren't in high memory.
  58. *
  59. * Returns: the aspace
  60. */
  61. struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
  62. {
  63. struct inode *aspace;
  64. aspace = new_inode(sdp->sd_vfs);
  65. if (aspace) {
  66. mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
  67. aspace->i_mapping->a_ops = &aspace_aops;
  68. aspace->i_size = ~0ULL;
  69. aspace->i_private = NULL;
  70. insert_inode_hash(aspace);
  71. }
  72. return aspace;
  73. }
  74. void gfs2_aspace_put(struct inode *aspace)
  75. {
  76. remove_inode_hash(aspace);
  77. iput(aspace);
  78. }
  79. /**
  80. * gfs2_meta_inval - Invalidate all buffers associated with a glock
  81. * @gl: the glock
  82. *
  83. */
  84. void gfs2_meta_inval(struct gfs2_glock *gl)
  85. {
  86. struct gfs2_sbd *sdp = gl->gl_sbd;
  87. struct inode *aspace = gl->gl_aspace;
  88. struct address_space *mapping = gl->gl_aspace->i_mapping;
  89. gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
  90. atomic_inc(&aspace->i_writecount);
  91. truncate_inode_pages(mapping, 0);
  92. atomic_dec(&aspace->i_writecount);
  93. gfs2_assert_withdraw(sdp, !mapping->nrpages);
  94. }
  95. /**
  96. * gfs2_meta_sync - Sync all buffers associated with a glock
  97. * @gl: The glock
  98. *
  99. */
  100. void gfs2_meta_sync(struct gfs2_glock *gl)
  101. {
  102. struct address_space *mapping = gl->gl_aspace->i_mapping;
  103. int error;
  104. filemap_fdatawrite(mapping);
  105. error = filemap_fdatawait(mapping);
  106. if (error)
  107. gfs2_io_error(gl->gl_sbd);
  108. }
  109. /**
  110. * getbuf - Get a buffer with a given address space
  111. * @gl: the glock
  112. * @blkno: the block number (filesystem scope)
  113. * @create: 1 if the buffer should be created
  114. *
  115. * Returns: the buffer
  116. */
  117. static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create)
  118. {
  119. struct address_space *mapping = gl->gl_aspace->i_mapping;
  120. struct gfs2_sbd *sdp = gl->gl_sbd;
  121. struct page *page;
  122. struct buffer_head *bh;
  123. unsigned int shift;
  124. unsigned long index;
  125. unsigned int bufnum;
  126. shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
  127. index = blkno >> shift; /* convert block to page */
  128. bufnum = blkno - (index << shift); /* block buf index within page */
  129. if (create) {
  130. for (;;) {
  131. page = grab_cache_page(mapping, index);
  132. if (page)
  133. break;
  134. yield();
  135. }
  136. } else {
  137. page = find_lock_page(mapping, index);
  138. if (!page)
  139. return NULL;
  140. }
  141. if (!page_has_buffers(page))
  142. create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
  143. /* Locate header for our buffer within our page */
  144. for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
  145. /* Do nothing */;
  146. get_bh(bh);
  147. if (!buffer_mapped(bh))
  148. map_bh(bh, sdp->sd_vfs, blkno);
  149. unlock_page(page);
  150. mark_page_accessed(page);
  151. page_cache_release(page);
  152. return bh;
  153. }
  154. static void meta_prep_new(struct buffer_head *bh)
  155. {
  156. struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
  157. lock_buffer(bh);
  158. clear_buffer_dirty(bh);
  159. set_buffer_uptodate(bh);
  160. unlock_buffer(bh);
  161. mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
  162. }
  163. /**
  164. * gfs2_meta_new - Get a block
  165. * @gl: The glock associated with this block
  166. * @blkno: The block number
  167. *
  168. * Returns: The buffer
  169. */
  170. struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
  171. {
  172. struct buffer_head *bh;
  173. bh = getbuf(gl, blkno, CREATE);
  174. meta_prep_new(bh);
  175. return bh;
  176. }
  177. /**
  178. * gfs2_meta_read - Read a block from disk
  179. * @gl: The glock covering the block
  180. * @blkno: The block number
  181. * @flags: flags
  182. * @bhp: the place where the buffer is returned (NULL on failure)
  183. *
  184. * Returns: errno
  185. */
  186. int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
  187. struct buffer_head **bhp)
  188. {
  189. *bhp = getbuf(gl, blkno, CREATE);
  190. if (!buffer_uptodate(*bhp))
  191. ll_rw_block(READ_META, 1, bhp);
  192. if (flags & DIO_WAIT) {
  193. int error = gfs2_meta_wait(gl->gl_sbd, *bhp);
  194. if (error) {
  195. brelse(*bhp);
  196. return error;
  197. }
  198. }
  199. return 0;
  200. }
  201. /**
  202. * gfs2_meta_wait - Reread a block from disk
  203. * @sdp: the filesystem
  204. * @bh: The block to wait for
  205. *
  206. * Returns: errno
  207. */
  208. int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
  209. {
  210. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  211. return -EIO;
  212. wait_on_buffer(bh);
  213. if (!buffer_uptodate(bh)) {
  214. struct gfs2_trans *tr = current->journal_info;
  215. if (tr && tr->tr_touched)
  216. gfs2_io_error_bh(sdp, bh);
  217. return -EIO;
  218. }
  219. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  220. return -EIO;
  221. return 0;
  222. }
  223. /**
  224. * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
  225. * @gl: the glock the buffer belongs to
  226. * @bh: The buffer to be attached to
  227. * @meta: Flag to indicate whether its metadata or not
  228. */
  229. void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
  230. int meta)
  231. {
  232. struct gfs2_bufdata *bd;
  233. if (meta)
  234. lock_page(bh->b_page);
  235. if (bh->b_private) {
  236. if (meta)
  237. unlock_page(bh->b_page);
  238. return;
  239. }
  240. bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
  241. bd->bd_bh = bh;
  242. bd->bd_gl = gl;
  243. INIT_LIST_HEAD(&bd->bd_list_tr);
  244. if (meta)
  245. lops_init_le(&bd->bd_le, &gfs2_buf_lops);
  246. else
  247. lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
  248. bh->b_private = bd;
  249. if (meta)
  250. unlock_page(bh->b_page);
  251. }
  252. void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
  253. {
  254. struct gfs2_sbd *sdp = GFS2_SB(bh->b_page->mapping->host);
  255. struct gfs2_bufdata *bd = bh->b_private;
  256. if (test_clear_buffer_pinned(bh)) {
  257. list_del_init(&bd->bd_le.le_list);
  258. if (meta) {
  259. gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
  260. sdp->sd_log_num_buf--;
  261. tr->tr_num_buf_rm++;
  262. } else {
  263. gfs2_assert_warn(sdp, sdp->sd_log_num_databuf);
  264. sdp->sd_log_num_databuf--;
  265. tr->tr_num_databuf_rm++;
  266. }
  267. tr->tr_touched = 1;
  268. brelse(bh);
  269. }
  270. if (bd) {
  271. if (bd->bd_ail) {
  272. gfs2_remove_from_ail(NULL, bd);
  273. bh->b_private = NULL;
  274. bd->bd_bh = NULL;
  275. bd->bd_blkno = bh->b_blocknr;
  276. gfs2_trans_add_revoke(sdp, bd);
  277. }
  278. }
  279. clear_buffer_dirty(bh);
  280. clear_buffer_uptodate(bh);
  281. }
  282. /**
  283. * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
  284. * @ip: the inode who owns the buffers
  285. * @bstart: the first buffer in the run
  286. * @blen: the number of buffers in the run
  287. *
  288. */
  289. void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
  290. {
  291. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  292. struct buffer_head *bh;
  293. while (blen) {
  294. bh = getbuf(ip->i_gl, bstart, NO_CREATE);
  295. if (bh) {
  296. lock_buffer(bh);
  297. gfs2_log_lock(sdp);
  298. gfs2_remove_from_journal(bh, current->journal_info, 1);
  299. gfs2_log_unlock(sdp);
  300. unlock_buffer(bh);
  301. brelse(bh);
  302. }
  303. bstart++;
  304. blen--;
  305. }
  306. }
  307. /**
  308. * gfs2_meta_cache_flush - get rid of any references on buffers for this inode
  309. * @ip: The GFS2 inode
  310. *
  311. * This releases buffers that are in the most-recently-used array of
  312. * blocks used for indirect block addressing for this inode.
  313. */
  314. void gfs2_meta_cache_flush(struct gfs2_inode *ip)
  315. {
  316. struct buffer_head **bh_slot;
  317. unsigned int x;
  318. spin_lock(&ip->i_spin);
  319. for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) {
  320. bh_slot = &ip->i_cache[x];
  321. if (*bh_slot) {
  322. brelse(*bh_slot);
  323. *bh_slot = NULL;
  324. }
  325. }
  326. spin_unlock(&ip->i_spin);
  327. }
  328. /**
  329. * gfs2_meta_indirect_buffer - Get a metadata buffer
  330. * @ip: The GFS2 inode
  331. * @height: The level of this buf in the metadata (indir addr) tree (if any)
  332. * @num: The block number (device relative) of the buffer
  333. * @new: Non-zero if we may create a new buffer
  334. * @bhp: the buffer is returned here
  335. *
  336. * Try to use the gfs2_inode's MRU metadata tree cache.
  337. *
  338. * Returns: errno
  339. */
  340. int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
  341. int new, struct buffer_head **bhp)
  342. {
  343. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  344. struct gfs2_glock *gl = ip->i_gl;
  345. struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height;
  346. int in_cache = 0;
  347. BUG_ON(!gl);
  348. BUG_ON(!sdp);
  349. spin_lock(&ip->i_spin);
  350. if (*bh_slot && (*bh_slot)->b_blocknr == num) {
  351. bh = *bh_slot;
  352. get_bh(bh);
  353. in_cache = 1;
  354. }
  355. spin_unlock(&ip->i_spin);
  356. if (!bh)
  357. bh = getbuf(gl, num, CREATE);
  358. if (!bh)
  359. return -ENOBUFS;
  360. if (new) {
  361. if (gfs2_assert_warn(sdp, height))
  362. goto err;
  363. meta_prep_new(bh);
  364. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  365. gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
  366. gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
  367. } else {
  368. u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
  369. if (!buffer_uptodate(bh)) {
  370. ll_rw_block(READ_META, 1, &bh);
  371. if (gfs2_meta_wait(sdp, bh))
  372. goto err;
  373. }
  374. if (gfs2_metatype_check(sdp, bh, mtype))
  375. goto err;
  376. }
  377. if (!in_cache) {
  378. spin_lock(&ip->i_spin);
  379. if (*bh_slot)
  380. brelse(*bh_slot);
  381. *bh_slot = bh;
  382. get_bh(bh);
  383. spin_unlock(&ip->i_spin);
  384. }
  385. *bhp = bh;
  386. return 0;
  387. err:
  388. brelse(bh);
  389. return -EIO;
  390. }
  391. /**
  392. * gfs2_meta_ra - start readahead on an extent of a file
  393. * @gl: the glock the blocks belong to
  394. * @dblock: the starting disk block
  395. * @extlen: the number of blocks in the extent
  396. *
  397. * returns: the first buffer in the extent
  398. */
  399. struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
  400. {
  401. struct gfs2_sbd *sdp = gl->gl_sbd;
  402. struct buffer_head *first_bh, *bh;
  403. u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
  404. sdp->sd_sb.sb_bsize_shift;
  405. BUG_ON(!extlen);
  406. if (max_ra < 1)
  407. max_ra = 1;
  408. if (extlen > max_ra)
  409. extlen = max_ra;
  410. first_bh = getbuf(gl, dblock, CREATE);
  411. if (buffer_uptodate(first_bh))
  412. goto out;
  413. if (!buffer_locked(first_bh))
  414. ll_rw_block(READ_META, 1, &first_bh);
  415. dblock++;
  416. extlen--;
  417. while (extlen) {
  418. bh = getbuf(gl, dblock, CREATE);
  419. if (!buffer_uptodate(bh) && !buffer_locked(bh))
  420. ll_rw_block(READA, 1, &bh);
  421. brelse(bh);
  422. dblock++;
  423. extlen--;
  424. if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
  425. goto out;
  426. }
  427. wait_on_buffer(first_bh);
  428. out:
  429. return first_bh;
  430. }