meta_io.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/mm.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/writeback.h>
  17. #include <linux/swap.h>
  18. #include <linux/delay.h>
  19. #include <linux/gfs2_ondisk.h>
  20. #include <linux/lm_interface.h>
  21. #include "gfs2.h"
  22. #include "incore.h"
  23. #include "glock.h"
  24. #include "glops.h"
  25. #include "inode.h"
  26. #include "log.h"
  27. #include "lops.h"
  28. #include "meta_io.h"
  29. #include "rgrp.h"
  30. #include "trans.h"
  31. #include "util.h"
  32. #include "ops_address.h"
  33. #define buffer_busy(bh) \
  34. ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
  35. #define buffer_in_io(bh) \
  36. ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock)))
  37. static int aspace_get_block(struct inode *inode, sector_t lblock,
  38. struct buffer_head *bh_result, int create)
  39. {
  40. gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
  41. return -EOPNOTSUPP;
  42. }
  43. static int gfs2_aspace_writepage(struct page *page,
  44. struct writeback_control *wbc)
  45. {
  46. return block_write_full_page(page, aspace_get_block, wbc);
  47. }
  48. static const struct address_space_operations aspace_aops = {
  49. .writepage = gfs2_aspace_writepage,
  50. .releasepage = gfs2_releasepage,
  51. };
  52. /**
  53. * gfs2_aspace_get - Create and initialize a struct inode structure
  54. * @sdp: the filesystem the aspace is in
  55. *
  56. * Right now a struct inode is just a struct inode. Maybe Linux
  57. * will supply a more lightweight address space construct (that works)
  58. * in the future.
  59. *
  60. * Make sure pages/buffers in this aspace aren't in high memory.
  61. *
  62. * Returns: the aspace
  63. */
  64. struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
  65. {
  66. struct inode *aspace;
  67. aspace = new_inode(sdp->sd_vfs);
  68. if (aspace) {
  69. mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
  70. aspace->i_mapping->a_ops = &aspace_aops;
  71. aspace->i_size = ~0ULL;
  72. aspace->i_private = NULL;
  73. insert_inode_hash(aspace);
  74. }
  75. return aspace;
  76. }
  77. void gfs2_aspace_put(struct inode *aspace)
  78. {
  79. remove_inode_hash(aspace);
  80. iput(aspace);
  81. }
  82. /**
  83. * gfs2_ail1_start_one - Start I/O on a part of the AIL
  84. * @sdp: the filesystem
  85. * @tr: the part of the AIL
  86. *
  87. */
  88. void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  89. {
  90. struct gfs2_bufdata *bd, *s;
  91. struct buffer_head *bh;
  92. int retry;
  93. BUG_ON(!spin_is_locked(&sdp->sd_log_lock));
  94. do {
  95. retry = 0;
  96. list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
  97. bd_ail_st_list) {
  98. bh = bd->bd_bh;
  99. gfs2_assert(sdp, bd->bd_ail == ai);
  100. if (!buffer_busy(bh)) {
  101. if (!buffer_uptodate(bh)) {
  102. gfs2_log_unlock(sdp);
  103. gfs2_io_error_bh(sdp, bh);
  104. gfs2_log_lock(sdp);
  105. }
  106. list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
  107. continue;
  108. }
  109. if (!buffer_dirty(bh))
  110. continue;
  111. list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
  112. gfs2_log_unlock(sdp);
  113. wait_on_buffer(bh);
  114. ll_rw_block(WRITE, 1, &bh);
  115. gfs2_log_lock(sdp);
  116. retry = 1;
  117. break;
  118. }
  119. } while (retry);
  120. }
  121. /**
  122. * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
  123. * @sdp: the filesystem
  124. * @ai: the AIL entry
  125. *
  126. */
  127. int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags)
  128. {
  129. struct gfs2_bufdata *bd, *s;
  130. struct buffer_head *bh;
  131. list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
  132. bd_ail_st_list) {
  133. bh = bd->bd_bh;
  134. gfs2_assert(sdp, bd->bd_ail == ai);
  135. if (buffer_busy(bh)) {
  136. if (flags & DIO_ALL)
  137. continue;
  138. else
  139. break;
  140. }
  141. if (!buffer_uptodate(bh))
  142. gfs2_io_error_bh(sdp, bh);
  143. list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
  144. }
  145. return list_empty(&ai->ai_ail1_list);
  146. }
  147. /**
  148. * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
  149. * @sdp: the filesystem
  150. * @ai: the AIL entry
  151. *
  152. */
  153. void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  154. {
  155. struct list_head *head = &ai->ai_ail2_list;
  156. struct gfs2_bufdata *bd;
  157. while (!list_empty(head)) {
  158. bd = list_entry(head->prev, struct gfs2_bufdata,
  159. bd_ail_st_list);
  160. gfs2_assert(sdp, bd->bd_ail == ai);
  161. bd->bd_ail = NULL;
  162. list_del(&bd->bd_ail_st_list);
  163. list_del(&bd->bd_ail_gl_list);
  164. atomic_dec(&bd->bd_gl->gl_ail_count);
  165. brelse(bd->bd_bh);
  166. }
  167. }
  168. /**
  169. * ail_empty_gl - remove all buffers for a given lock from the AIL
  170. * @gl: the glock
  171. *
  172. * None of the buffers should be dirty, locked, or pinned.
  173. */
  174. void gfs2_ail_empty_gl(struct gfs2_glock *gl)
  175. {
  176. struct gfs2_sbd *sdp = gl->gl_sbd;
  177. unsigned int blocks;
  178. struct list_head *head = &gl->gl_ail_list;
  179. struct gfs2_bufdata *bd;
  180. struct buffer_head *bh;
  181. u64 blkno;
  182. int error;
  183. blocks = atomic_read(&gl->gl_ail_count);
  184. if (!blocks)
  185. return;
  186. error = gfs2_trans_begin(sdp, 0, blocks);
  187. if (gfs2_assert_withdraw(sdp, !error))
  188. return;
  189. gfs2_log_lock(sdp);
  190. while (!list_empty(head)) {
  191. bd = list_entry(head->next, struct gfs2_bufdata,
  192. bd_ail_gl_list);
  193. bh = bd->bd_bh;
  194. blkno = bh->b_blocknr;
  195. gfs2_assert_withdraw(sdp, !buffer_busy(bh));
  196. bd->bd_ail = NULL;
  197. list_del(&bd->bd_ail_st_list);
  198. list_del(&bd->bd_ail_gl_list);
  199. atomic_dec(&gl->gl_ail_count);
  200. brelse(bh);
  201. gfs2_log_unlock(sdp);
  202. gfs2_trans_add_revoke(sdp, blkno);
  203. gfs2_log_lock(sdp);
  204. }
  205. gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
  206. gfs2_log_unlock(sdp);
  207. gfs2_trans_end(sdp);
  208. gfs2_log_flush(sdp, NULL);
  209. }
  210. /**
  211. * gfs2_meta_inval - Invalidate all buffers associated with a glock
  212. * @gl: the glock
  213. *
  214. */
  215. void gfs2_meta_inval(struct gfs2_glock *gl)
  216. {
  217. struct gfs2_sbd *sdp = gl->gl_sbd;
  218. struct inode *aspace = gl->gl_aspace;
  219. struct address_space *mapping = gl->gl_aspace->i_mapping;
  220. gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
  221. atomic_inc(&aspace->i_writecount);
  222. truncate_inode_pages(mapping, 0);
  223. atomic_dec(&aspace->i_writecount);
  224. gfs2_assert_withdraw(sdp, !mapping->nrpages);
  225. }
  226. /**
  227. * gfs2_meta_sync - Sync all buffers associated with a glock
  228. * @gl: The glock
  229. *
  230. */
  231. void gfs2_meta_sync(struct gfs2_glock *gl)
  232. {
  233. struct address_space *mapping = gl->gl_aspace->i_mapping;
  234. int error;
  235. filemap_fdatawrite(mapping);
  236. error = filemap_fdatawait(mapping);
  237. if (error)
  238. gfs2_io_error(gl->gl_sbd);
  239. }
  240. /**
  241. * getbuf - Get a buffer with a given address space
  242. * @sdp: the filesystem
  243. * @aspace: the address space
  244. * @blkno: the block number (filesystem scope)
  245. * @create: 1 if the buffer should be created
  246. *
  247. * Returns: the buffer
  248. */
  249. static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
  250. u64 blkno, int create)
  251. {
  252. struct page *page;
  253. struct buffer_head *bh;
  254. unsigned int shift;
  255. unsigned long index;
  256. unsigned int bufnum;
  257. shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
  258. index = blkno >> shift; /* convert block to page */
  259. bufnum = blkno - (index << shift); /* block buf index within page */
  260. if (create) {
  261. for (;;) {
  262. page = grab_cache_page(aspace->i_mapping, index);
  263. if (page)
  264. break;
  265. yield();
  266. }
  267. } else {
  268. page = find_lock_page(aspace->i_mapping, index);
  269. if (!page)
  270. return NULL;
  271. }
  272. if (!page_has_buffers(page))
  273. create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
  274. /* Locate header for our buffer within our page */
  275. for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
  276. /* Do nothing */;
  277. get_bh(bh);
  278. if (!buffer_mapped(bh))
  279. map_bh(bh, sdp->sd_vfs, blkno);
  280. unlock_page(page);
  281. mark_page_accessed(page);
  282. page_cache_release(page);
  283. return bh;
  284. }
  285. static void meta_prep_new(struct buffer_head *bh)
  286. {
  287. struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
  288. lock_buffer(bh);
  289. clear_buffer_dirty(bh);
  290. set_buffer_uptodate(bh);
  291. unlock_buffer(bh);
  292. mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
  293. }
  294. /**
  295. * gfs2_meta_new - Get a block
  296. * @gl: The glock associated with this block
  297. * @blkno: The block number
  298. *
  299. * Returns: The buffer
  300. */
  301. struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
  302. {
  303. struct buffer_head *bh;
  304. bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
  305. meta_prep_new(bh);
  306. return bh;
  307. }
  308. /**
  309. * gfs2_meta_read - Read a block from disk
  310. * @gl: The glock covering the block
  311. * @blkno: The block number
  312. * @flags: flags
  313. * @bhp: the place where the buffer is returned (NULL on failure)
  314. *
  315. * Returns: errno
  316. */
  317. int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
  318. struct buffer_head **bhp)
  319. {
  320. *bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
  321. if (!buffer_uptodate(*bhp))
  322. ll_rw_block(READ, 1, bhp);
  323. if (flags & DIO_WAIT) {
  324. int error = gfs2_meta_wait(gl->gl_sbd, *bhp);
  325. if (error) {
  326. brelse(*bhp);
  327. return error;
  328. }
  329. }
  330. return 0;
  331. }
  332. /**
  333. * gfs2_meta_wait - Reread a block from disk
  334. * @sdp: the filesystem
  335. * @bh: The block to wait for
  336. *
  337. * Returns: errno
  338. */
  339. int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
  340. {
  341. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  342. return -EIO;
  343. wait_on_buffer(bh);
  344. if (!buffer_uptodate(bh)) {
  345. struct gfs2_trans *tr = current->journal_info;
  346. if (tr && tr->tr_touched)
  347. gfs2_io_error_bh(sdp, bh);
  348. return -EIO;
  349. }
  350. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  351. return -EIO;
  352. return 0;
  353. }
  354. /**
  355. * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
  356. * @gl: the glock the buffer belongs to
  357. * @bh: The buffer to be attached to
  358. * @meta: Flag to indicate whether its metadata or not
  359. */
  360. void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
  361. int meta)
  362. {
  363. struct gfs2_bufdata *bd;
  364. if (meta)
  365. lock_page(bh->b_page);
  366. if (bh->b_private) {
  367. if (meta)
  368. unlock_page(bh->b_page);
  369. return;
  370. }
  371. bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
  372. memset(bd, 0, sizeof(struct gfs2_bufdata));
  373. bd->bd_bh = bh;
  374. bd->bd_gl = gl;
  375. INIT_LIST_HEAD(&bd->bd_list_tr);
  376. if (meta)
  377. lops_init_le(&bd->bd_le, &gfs2_buf_lops);
  378. else
  379. lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
  380. bh->b_private = bd;
  381. if (meta)
  382. unlock_page(bh->b_page);
  383. }
  384. /**
  385. * gfs2_pin - Pin a buffer in memory
  386. * @sdp: the filesystem the buffer belongs to
  387. * @bh: The buffer to be pinned
  388. *
  389. */
  390. void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
  391. {
  392. struct gfs2_bufdata *bd = bh->b_private;
  393. gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
  394. if (test_set_buffer_pinned(bh))
  395. gfs2_assert_withdraw(sdp, 0);
  396. wait_on_buffer(bh);
  397. /* If this buffer is in the AIL and it has already been written
  398. to in-place disk block, remove it from the AIL. */
  399. gfs2_log_lock(sdp);
  400. if (bd->bd_ail && !buffer_in_io(bh))
  401. list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
  402. gfs2_log_unlock(sdp);
  403. clear_buffer_dirty(bh);
  404. wait_on_buffer(bh);
  405. if (!buffer_uptodate(bh))
  406. gfs2_io_error_bh(sdp, bh);
  407. get_bh(bh);
  408. }
  409. /**
  410. * gfs2_unpin - Unpin a buffer
  411. * @sdp: the filesystem the buffer belongs to
  412. * @bh: The buffer to unpin
  413. * @ai:
  414. *
  415. */
  416. void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
  417. struct gfs2_ail *ai)
  418. {
  419. struct gfs2_bufdata *bd = bh->b_private;
  420. gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
  421. if (!buffer_pinned(bh))
  422. gfs2_assert_withdraw(sdp, 0);
  423. mark_buffer_dirty(bh);
  424. clear_buffer_pinned(bh);
  425. gfs2_log_lock(sdp);
  426. if (bd->bd_ail) {
  427. list_del(&bd->bd_ail_st_list);
  428. brelse(bh);
  429. } else {
  430. struct gfs2_glock *gl = bd->bd_gl;
  431. list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
  432. atomic_inc(&gl->gl_ail_count);
  433. }
  434. bd->bd_ail = ai;
  435. list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
  436. gfs2_log_unlock(sdp);
  437. }
  438. /**
  439. * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
  440. * @ip: the inode who owns the buffers
  441. * @bstart: the first buffer in the run
  442. * @blen: the number of buffers in the run
  443. *
  444. */
  445. void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
  446. {
  447. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  448. struct inode *aspace = ip->i_gl->gl_aspace;
  449. struct buffer_head *bh;
  450. while (blen) {
  451. bh = getbuf(sdp, aspace, bstart, NO_CREATE);
  452. if (bh) {
  453. struct gfs2_bufdata *bd = bh->b_private;
  454. if (test_clear_buffer_pinned(bh)) {
  455. struct gfs2_trans *tr = current->journal_info;
  456. gfs2_log_lock(sdp);
  457. list_del_init(&bd->bd_le.le_list);
  458. gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
  459. sdp->sd_log_num_buf--;
  460. gfs2_log_unlock(sdp);
  461. tr->tr_num_buf_rm++;
  462. brelse(bh);
  463. }
  464. if (bd) {
  465. gfs2_log_lock(sdp);
  466. if (bd->bd_ail) {
  467. u64 blkno = bh->b_blocknr;
  468. bd->bd_ail = NULL;
  469. list_del(&bd->bd_ail_st_list);
  470. list_del(&bd->bd_ail_gl_list);
  471. atomic_dec(&bd->bd_gl->gl_ail_count);
  472. brelse(bh);
  473. gfs2_log_unlock(sdp);
  474. gfs2_trans_add_revoke(sdp, blkno);
  475. } else
  476. gfs2_log_unlock(sdp);
  477. }
  478. lock_buffer(bh);
  479. clear_buffer_dirty(bh);
  480. clear_buffer_uptodate(bh);
  481. unlock_buffer(bh);
  482. brelse(bh);
  483. }
  484. bstart++;
  485. blen--;
  486. }
  487. }
  488. /**
  489. * gfs2_meta_cache_flush - get rid of any references on buffers for this inode
  490. * @ip: The GFS2 inode
  491. *
  492. * This releases buffers that are in the most-recently-used array of
  493. * blocks used for indirect block addressing for this inode.
  494. */
  495. void gfs2_meta_cache_flush(struct gfs2_inode *ip)
  496. {
  497. struct buffer_head **bh_slot;
  498. unsigned int x;
  499. spin_lock(&ip->i_spin);
  500. for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) {
  501. bh_slot = &ip->i_cache[x];
  502. if (!*bh_slot)
  503. break;
  504. brelse(*bh_slot);
  505. *bh_slot = NULL;
  506. }
  507. spin_unlock(&ip->i_spin);
  508. }
  509. /**
  510. * gfs2_meta_indirect_buffer - Get a metadata buffer
  511. * @ip: The GFS2 inode
  512. * @height: The level of this buf in the metadata (indir addr) tree (if any)
  513. * @num: The block number (device relative) of the buffer
  514. * @new: Non-zero if we may create a new buffer
  515. * @bhp: the buffer is returned here
  516. *
  517. * Try to use the gfs2_inode's MRU metadata tree cache.
  518. *
  519. * Returns: errno
  520. */
  521. int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
  522. int new, struct buffer_head **bhp)
  523. {
  524. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  525. struct gfs2_glock *gl = ip->i_gl;
  526. struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height;
  527. int in_cache = 0;
  528. spin_lock(&ip->i_spin);
  529. if (*bh_slot && (*bh_slot)->b_blocknr == num) {
  530. bh = *bh_slot;
  531. get_bh(bh);
  532. in_cache = 1;
  533. }
  534. spin_unlock(&ip->i_spin);
  535. if (!bh)
  536. bh = getbuf(gl->gl_sbd, gl->gl_aspace, num, CREATE);
  537. if (!bh)
  538. return -ENOBUFS;
  539. if (new) {
  540. if (gfs2_assert_warn(sdp, height))
  541. goto err;
  542. meta_prep_new(bh);
  543. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  544. gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
  545. gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
  546. } else {
  547. u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
  548. if (!buffer_uptodate(bh)) {
  549. ll_rw_block(READ, 1, &bh);
  550. if (gfs2_meta_wait(sdp, bh))
  551. goto err;
  552. }
  553. if (gfs2_metatype_check(sdp, bh, mtype))
  554. goto err;
  555. }
  556. if (!in_cache) {
  557. spin_lock(&ip->i_spin);
  558. if (*bh_slot)
  559. brelse(*bh_slot);
  560. *bh_slot = bh;
  561. get_bh(bh);
  562. spin_unlock(&ip->i_spin);
  563. }
  564. *bhp = bh;
  565. return 0;
  566. err:
  567. brelse(bh);
  568. return -EIO;
  569. }
  570. /**
  571. * gfs2_meta_ra - start readahead on an extent of a file
  572. * @gl: the glock the blocks belong to
  573. * @dblock: the starting disk block
  574. * @extlen: the number of blocks in the extent
  575. *
  576. * returns: the first buffer in the extent
  577. */
  578. struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
  579. {
  580. struct gfs2_sbd *sdp = gl->gl_sbd;
  581. struct inode *aspace = gl->gl_aspace;
  582. struct buffer_head *first_bh, *bh;
  583. u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
  584. sdp->sd_sb.sb_bsize_shift;
  585. BUG_ON(!extlen);
  586. if (max_ra < 1)
  587. max_ra = 1;
  588. if (extlen > max_ra)
  589. extlen = max_ra;
  590. first_bh = getbuf(sdp, aspace, dblock, CREATE);
  591. if (buffer_uptodate(first_bh))
  592. goto out;
  593. if (!buffer_locked(first_bh))
  594. ll_rw_block(READ, 1, &first_bh);
  595. dblock++;
  596. extlen--;
  597. while (extlen) {
  598. bh = getbuf(sdp, aspace, dblock, CREATE);
  599. if (!buffer_uptodate(bh) && !buffer_locked(bh))
  600. ll_rw_block(READA, 1, &bh);
  601. brelse(bh);
  602. dblock++;
  603. extlen--;
  604. if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
  605. goto out;
  606. }
  607. wait_on_buffer(first_bh);
  608. out:
  609. return first_bh;
  610. }
  611. /**
  612. * gfs2_meta_syncfs - sync all the buffers in a filesystem
  613. * @sdp: the filesystem
  614. *
  615. */
  616. void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
  617. {
  618. gfs2_log_flush(sdp, NULL);
  619. for (;;) {
  620. gfs2_ail1_start(sdp, DIO_ALL);
  621. if (gfs2_ail1_empty(sdp, DIO_ALL))
  622. break;
  623. msleep(10);
  624. }
  625. }