meta_io.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/mm.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/writeback.h>
  17. #include <linux/swap.h>
  18. #include <linux/delay.h>
  19. #include <linux/bio.h>
  20. #include <linux/gfs2_ondisk.h>
  21. #include <linux/lm_interface.h>
  22. #include "gfs2.h"
  23. #include "incore.h"
  24. #include "glock.h"
  25. #include "glops.h"
  26. #include "inode.h"
  27. #include "log.h"
  28. #include "lops.h"
  29. #include "meta_io.h"
  30. #include "rgrp.h"
  31. #include "trans.h"
  32. #include "util.h"
  33. #include "ops_address.h"
  34. static int aspace_get_block(struct inode *inode, sector_t lblock,
  35. struct buffer_head *bh_result, int create)
  36. {
  37. gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
  38. return -EOPNOTSUPP;
  39. }
  40. static int gfs2_aspace_writepage(struct page *page,
  41. struct writeback_control *wbc)
  42. {
  43. return block_write_full_page(page, aspace_get_block, wbc);
  44. }
  45. static const struct address_space_operations aspace_aops = {
  46. .writepage = gfs2_aspace_writepage,
  47. .releasepage = gfs2_releasepage,
  48. };
  49. /**
  50. * gfs2_aspace_get - Create and initialize a struct inode structure
  51. * @sdp: the filesystem the aspace is in
  52. *
  53. * Right now a struct inode is just a struct inode. Maybe Linux
  54. * will supply a more lightweight address space construct (that works)
  55. * in the future.
  56. *
  57. * Make sure pages/buffers in this aspace aren't in high memory.
  58. *
  59. * Returns: the aspace
  60. */
  61. struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
  62. {
  63. struct inode *aspace;
  64. aspace = new_inode(sdp->sd_vfs);
  65. if (aspace) {
  66. mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
  67. aspace->i_mapping->a_ops = &aspace_aops;
  68. aspace->i_size = ~0ULL;
  69. aspace->i_private = NULL;
  70. insert_inode_hash(aspace);
  71. }
  72. return aspace;
  73. }
  74. void gfs2_aspace_put(struct inode *aspace)
  75. {
  76. remove_inode_hash(aspace);
  77. iput(aspace);
  78. }
  79. /**
  80. * gfs2_meta_inval - Invalidate all buffers associated with a glock
  81. * @gl: the glock
  82. *
  83. */
  84. void gfs2_meta_inval(struct gfs2_glock *gl)
  85. {
  86. struct gfs2_sbd *sdp = gl->gl_sbd;
  87. struct inode *aspace = gl->gl_aspace;
  88. struct address_space *mapping = gl->gl_aspace->i_mapping;
  89. gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
  90. atomic_inc(&aspace->i_writecount);
  91. truncate_inode_pages(mapping, 0);
  92. atomic_dec(&aspace->i_writecount);
  93. gfs2_assert_withdraw(sdp, !mapping->nrpages);
  94. }
  95. /**
  96. * gfs2_meta_sync - Sync all buffers associated with a glock
  97. * @gl: The glock
  98. *
  99. */
  100. void gfs2_meta_sync(struct gfs2_glock *gl)
  101. {
  102. struct address_space *mapping = gl->gl_aspace->i_mapping;
  103. int error;
  104. filemap_fdatawrite(mapping);
  105. error = filemap_fdatawait(mapping);
  106. if (error)
  107. gfs2_io_error(gl->gl_sbd);
  108. }
  109. /**
  110. * getbuf - Get a buffer with a given address space
  111. * @gl: the glock
  112. * @blkno: the block number (filesystem scope)
  113. * @create: 1 if the buffer should be created
  114. *
  115. * Returns: the buffer
  116. */
  117. static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create)
  118. {
  119. struct address_space *mapping = gl->gl_aspace->i_mapping;
  120. struct gfs2_sbd *sdp = gl->gl_sbd;
  121. struct page *page;
  122. struct buffer_head *bh;
  123. unsigned int shift;
  124. unsigned long index;
  125. unsigned int bufnum;
  126. shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
  127. index = blkno >> shift; /* convert block to page */
  128. bufnum = blkno - (index << shift); /* block buf index within page */
  129. if (create) {
  130. for (;;) {
  131. page = grab_cache_page(mapping, index);
  132. if (page)
  133. break;
  134. yield();
  135. }
  136. } else {
  137. page = find_lock_page(mapping, index);
  138. if (!page)
  139. return NULL;
  140. }
  141. if (!page_has_buffers(page))
  142. create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
  143. /* Locate header for our buffer within our page */
  144. for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
  145. /* Do nothing */;
  146. get_bh(bh);
  147. if (!buffer_mapped(bh))
  148. map_bh(bh, sdp->sd_vfs, blkno);
  149. unlock_page(page);
  150. mark_page_accessed(page);
  151. page_cache_release(page);
  152. return bh;
  153. }
  154. static void meta_prep_new(struct buffer_head *bh)
  155. {
  156. struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
  157. lock_buffer(bh);
  158. clear_buffer_dirty(bh);
  159. set_buffer_uptodate(bh);
  160. unlock_buffer(bh);
  161. mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
  162. }
  163. /**
  164. * gfs2_meta_new - Get a block
  165. * @gl: The glock associated with this block
  166. * @blkno: The block number
  167. *
  168. * Returns: The buffer
  169. */
  170. struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
  171. {
  172. struct buffer_head *bh;
  173. bh = getbuf(gl, blkno, CREATE);
  174. meta_prep_new(bh);
  175. return bh;
  176. }
  177. /**
  178. * gfs2_meta_read - Read a block from disk
  179. * @gl: The glock covering the block
  180. * @blkno: The block number
  181. * @flags: flags
  182. * @bhp: the place where the buffer is returned (NULL on failure)
  183. *
  184. * Returns: errno
  185. */
  186. int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
  187. struct buffer_head **bhp)
  188. {
  189. *bhp = getbuf(gl, blkno, CREATE);
  190. if (!buffer_uptodate(*bhp))
  191. ll_rw_block(READ_META, 1, bhp);
  192. if (flags & DIO_WAIT) {
  193. int error = gfs2_meta_wait(gl->gl_sbd, *bhp);
  194. if (error) {
  195. brelse(*bhp);
  196. return error;
  197. }
  198. }
  199. return 0;
  200. }
  201. /**
  202. * gfs2_meta_wait - Reread a block from disk
  203. * @sdp: the filesystem
  204. * @bh: The block to wait for
  205. *
  206. * Returns: errno
  207. */
  208. int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
  209. {
  210. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  211. return -EIO;
  212. wait_on_buffer(bh);
  213. if (!buffer_uptodate(bh)) {
  214. struct gfs2_trans *tr = current->journal_info;
  215. if (tr && tr->tr_touched)
  216. gfs2_io_error_bh(sdp, bh);
  217. return -EIO;
  218. }
  219. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  220. return -EIO;
  221. return 0;
  222. }
  223. /**
  224. * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
  225. * @gl: the glock the buffer belongs to
  226. * @bh: The buffer to be attached to
  227. * @meta: Flag to indicate whether its metadata or not
  228. */
  229. void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
  230. int meta)
  231. {
  232. struct gfs2_bufdata *bd;
  233. if (meta)
  234. lock_page(bh->b_page);
  235. if (bh->b_private) {
  236. if (meta)
  237. unlock_page(bh->b_page);
  238. return;
  239. }
  240. bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
  241. bd->bd_bh = bh;
  242. bd->bd_gl = gl;
  243. INIT_LIST_HEAD(&bd->bd_list_tr);
  244. if (meta)
  245. lops_init_le(&bd->bd_le, &gfs2_buf_lops);
  246. else
  247. lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
  248. bh->b_private = bd;
  249. if (meta)
  250. unlock_page(bh->b_page);
  251. }
  252. /**
  253. * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
  254. * @ip: the inode who owns the buffers
  255. * @bstart: the first buffer in the run
  256. * @blen: the number of buffers in the run
  257. *
  258. */
  259. void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
  260. {
  261. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  262. struct buffer_head *bh;
  263. while (blen) {
  264. bh = getbuf(ip->i_gl, bstart, NO_CREATE);
  265. if (bh) {
  266. struct gfs2_bufdata *bd = bh->b_private;
  267. if (test_clear_buffer_pinned(bh)) {
  268. struct gfs2_trans *tr = current->journal_info;
  269. struct gfs2_inode *bh_ip =
  270. GFS2_I(bh->b_page->mapping->host);
  271. gfs2_log_lock(sdp);
  272. list_del_init(&bd->bd_le.le_list);
  273. gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
  274. sdp->sd_log_num_buf--;
  275. gfs2_log_unlock(sdp);
  276. if (bh_ip->i_inode.i_private != NULL)
  277. tr->tr_num_databuf_rm++;
  278. else
  279. tr->tr_num_buf_rm++;
  280. brelse(bh);
  281. }
  282. if (bd) {
  283. gfs2_log_lock(sdp);
  284. if (bd->bd_ail) {
  285. u64 blkno = bh->b_blocknr;
  286. bd->bd_ail = NULL;
  287. list_del(&bd->bd_ail_st_list);
  288. list_del(&bd->bd_ail_gl_list);
  289. atomic_dec(&bd->bd_gl->gl_ail_count);
  290. brelse(bh);
  291. gfs2_log_unlock(sdp);
  292. gfs2_trans_add_revoke(sdp, blkno);
  293. } else
  294. gfs2_log_unlock(sdp);
  295. }
  296. lock_buffer(bh);
  297. clear_buffer_dirty(bh);
  298. clear_buffer_uptodate(bh);
  299. unlock_buffer(bh);
  300. brelse(bh);
  301. }
  302. bstart++;
  303. blen--;
  304. }
  305. }
  306. /**
  307. * gfs2_meta_cache_flush - get rid of any references on buffers for this inode
  308. * @ip: The GFS2 inode
  309. *
  310. * This releases buffers that are in the most-recently-used array of
  311. * blocks used for indirect block addressing for this inode.
  312. */
  313. void gfs2_meta_cache_flush(struct gfs2_inode *ip)
  314. {
  315. struct buffer_head **bh_slot;
  316. unsigned int x;
  317. spin_lock(&ip->i_spin);
  318. for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) {
  319. bh_slot = &ip->i_cache[x];
  320. if (!*bh_slot)
  321. break;
  322. brelse(*bh_slot);
  323. *bh_slot = NULL;
  324. }
  325. spin_unlock(&ip->i_spin);
  326. }
  327. /**
  328. * gfs2_meta_indirect_buffer - Get a metadata buffer
  329. * @ip: The GFS2 inode
  330. * @height: The level of this buf in the metadata (indir addr) tree (if any)
  331. * @num: The block number (device relative) of the buffer
  332. * @new: Non-zero if we may create a new buffer
  333. * @bhp: the buffer is returned here
  334. *
  335. * Try to use the gfs2_inode's MRU metadata tree cache.
  336. *
  337. * Returns: errno
  338. */
  339. int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
  340. int new, struct buffer_head **bhp)
  341. {
  342. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  343. struct gfs2_glock *gl = ip->i_gl;
  344. struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height;
  345. int in_cache = 0;
  346. BUG_ON(!gl);
  347. BUG_ON(!sdp);
  348. spin_lock(&ip->i_spin);
  349. if (*bh_slot && (*bh_slot)->b_blocknr == num) {
  350. bh = *bh_slot;
  351. get_bh(bh);
  352. in_cache = 1;
  353. }
  354. spin_unlock(&ip->i_spin);
  355. if (!bh)
  356. bh = getbuf(gl, num, CREATE);
  357. if (!bh)
  358. return -ENOBUFS;
  359. if (new) {
  360. if (gfs2_assert_warn(sdp, height))
  361. goto err;
  362. meta_prep_new(bh);
  363. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  364. gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
  365. gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
  366. } else {
  367. u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
  368. if (!buffer_uptodate(bh)) {
  369. ll_rw_block(READ_META, 1, &bh);
  370. if (gfs2_meta_wait(sdp, bh))
  371. goto err;
  372. }
  373. if (gfs2_metatype_check(sdp, bh, mtype))
  374. goto err;
  375. }
  376. if (!in_cache) {
  377. spin_lock(&ip->i_spin);
  378. if (*bh_slot)
  379. brelse(*bh_slot);
  380. *bh_slot = bh;
  381. get_bh(bh);
  382. spin_unlock(&ip->i_spin);
  383. }
  384. *bhp = bh;
  385. return 0;
  386. err:
  387. brelse(bh);
  388. return -EIO;
  389. }
  390. /**
  391. * gfs2_meta_ra - start readahead on an extent of a file
  392. * @gl: the glock the blocks belong to
  393. * @dblock: the starting disk block
  394. * @extlen: the number of blocks in the extent
  395. *
  396. * returns: the first buffer in the extent
  397. */
  398. struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
  399. {
  400. struct gfs2_sbd *sdp = gl->gl_sbd;
  401. struct buffer_head *first_bh, *bh;
  402. u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
  403. sdp->sd_sb.sb_bsize_shift;
  404. BUG_ON(!extlen);
  405. if (max_ra < 1)
  406. max_ra = 1;
  407. if (extlen > max_ra)
  408. extlen = max_ra;
  409. first_bh = getbuf(gl, dblock, CREATE);
  410. if (buffer_uptodate(first_bh))
  411. goto out;
  412. if (!buffer_locked(first_bh))
  413. ll_rw_block(READ_META, 1, &first_bh);
  414. dblock++;
  415. extlen--;
  416. while (extlen) {
  417. bh = getbuf(gl, dblock, CREATE);
  418. if (!buffer_uptodate(bh) && !buffer_locked(bh))
  419. ll_rw_block(READA, 1, &bh);
  420. brelse(bh);
  421. dblock++;
  422. extlen--;
  423. if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
  424. goto out;
  425. }
  426. wait_on_buffer(first_bh);
  427. out:
  428. return first_bh;
  429. }