meta_io.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/mm.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/writeback.h>
  17. #include <linux/swap.h>
  18. #include <linux/delay.h>
  19. #include <linux/gfs2_ondisk.h>
  20. #include "gfs2.h"
  21. #include "lm_interface.h"
  22. #include "incore.h"
  23. #include "glock.h"
  24. #include "glops.h"
  25. #include "inode.h"
  26. #include "log.h"
  27. #include "lops.h"
  28. #include "meta_io.h"
  29. #include "rgrp.h"
  30. #include "trans.h"
  31. #include "util.h"
  32. #include "ops_address.h"
  33. #define buffer_busy(bh) \
  34. ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
  35. #define buffer_in_io(bh) \
  36. ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock)))
  37. static int aspace_get_block(struct inode *inode, sector_t lblock,
  38. struct buffer_head *bh_result, int create)
  39. {
  40. gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
  41. return -EOPNOTSUPP;
  42. }
  43. static int gfs2_aspace_writepage(struct page *page,
  44. struct writeback_control *wbc)
  45. {
  46. return block_write_full_page(page, aspace_get_block, wbc);
  47. }
  48. static const struct address_space_operations aspace_aops = {
  49. .writepage = gfs2_aspace_writepage,
  50. .releasepage = gfs2_releasepage,
  51. };
  52. /**
  53. * gfs2_aspace_get - Create and initialize a struct inode structure
  54. * @sdp: the filesystem the aspace is in
  55. *
  56. * Right now a struct inode is just a struct inode. Maybe Linux
  57. * will supply a more lightweight address space construct (that works)
  58. * in the future.
  59. *
  60. * Make sure pages/buffers in this aspace aren't in high memory.
  61. *
  62. * Returns: the aspace
  63. */
  64. struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
  65. {
  66. struct inode *aspace;
  67. aspace = new_inode(sdp->sd_vfs);
  68. if (aspace) {
  69. mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
  70. aspace->i_mapping->a_ops = &aspace_aops;
  71. aspace->i_size = ~0ULL;
  72. aspace->u.generic_ip = NULL;
  73. insert_inode_hash(aspace);
  74. }
  75. return aspace;
  76. }
  77. void gfs2_aspace_put(struct inode *aspace)
  78. {
  79. remove_inode_hash(aspace);
  80. iput(aspace);
  81. }
  82. /**
  83. * gfs2_ail1_start_one - Start I/O on a part of the AIL
  84. * @sdp: the filesystem
  85. * @tr: the part of the AIL
  86. *
  87. */
  88. void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  89. {
  90. struct gfs2_bufdata *bd, *s;
  91. struct buffer_head *bh;
  92. int retry;
  93. BUG_ON(!spin_is_locked(&sdp->sd_log_lock));
  94. do {
  95. retry = 0;
  96. list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
  97. bd_ail_st_list) {
  98. bh = bd->bd_bh;
  99. gfs2_assert(sdp, bd->bd_ail == ai);
  100. if (!buffer_busy(bh)) {
  101. if (!buffer_uptodate(bh)) {
  102. gfs2_log_unlock(sdp);
  103. gfs2_io_error_bh(sdp, bh);
  104. gfs2_log_lock(sdp);
  105. }
  106. list_move(&bd->bd_ail_st_list,
  107. &ai->ai_ail2_list);
  108. continue;
  109. }
  110. if (!buffer_dirty(bh))
  111. continue;
  112. list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
  113. gfs2_log_unlock(sdp);
  114. wait_on_buffer(bh);
  115. ll_rw_block(WRITE, 1, &bh);
  116. gfs2_log_lock(sdp);
  117. retry = 1;
  118. break;
  119. }
  120. } while (retry);
  121. }
  122. /**
  123. * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
  124. * @sdp: the filesystem
  125. * @ai: the AIL entry
  126. *
  127. */
  128. int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags)
  129. {
  130. struct gfs2_bufdata *bd, *s;
  131. struct buffer_head *bh;
  132. list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
  133. bd_ail_st_list) {
  134. bh = bd->bd_bh;
  135. gfs2_assert(sdp, bd->bd_ail == ai);
  136. if (buffer_busy(bh)) {
  137. if (flags & DIO_ALL)
  138. continue;
  139. else
  140. break;
  141. }
  142. if (!buffer_uptodate(bh))
  143. gfs2_io_error_bh(sdp, bh);
  144. list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
  145. }
  146. return list_empty(&ai->ai_ail1_list);
  147. }
  148. /**
  149. * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
  150. * @sdp: the filesystem
  151. * @ai: the AIL entry
  152. *
  153. */
  154. void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  155. {
  156. struct list_head *head = &ai->ai_ail2_list;
  157. struct gfs2_bufdata *bd;
  158. while (!list_empty(head)) {
  159. bd = list_entry(head->prev, struct gfs2_bufdata,
  160. bd_ail_st_list);
  161. gfs2_assert(sdp, bd->bd_ail == ai);
  162. bd->bd_ail = NULL;
  163. list_del(&bd->bd_ail_st_list);
  164. list_del(&bd->bd_ail_gl_list);
  165. atomic_dec(&bd->bd_gl->gl_ail_count);
  166. brelse(bd->bd_bh);
  167. }
  168. }
  169. /**
  170. * ail_empty_gl - remove all buffers for a given lock from the AIL
  171. * @gl: the glock
  172. *
  173. * None of the buffers should be dirty, locked, or pinned.
  174. */
  175. void gfs2_ail_empty_gl(struct gfs2_glock *gl)
  176. {
  177. struct gfs2_sbd *sdp = gl->gl_sbd;
  178. unsigned int blocks;
  179. struct list_head *head = &gl->gl_ail_list;
  180. struct gfs2_bufdata *bd;
  181. struct buffer_head *bh;
  182. u64 blkno;
  183. int error;
  184. blocks = atomic_read(&gl->gl_ail_count);
  185. if (!blocks)
  186. return;
  187. error = gfs2_trans_begin(sdp, 0, blocks);
  188. if (gfs2_assert_withdraw(sdp, !error))
  189. return;
  190. gfs2_log_lock(sdp);
  191. while (!list_empty(head)) {
  192. bd = list_entry(head->next, struct gfs2_bufdata,
  193. bd_ail_gl_list);
  194. bh = bd->bd_bh;
  195. blkno = bh->b_blocknr;
  196. gfs2_assert_withdraw(sdp, !buffer_busy(bh));
  197. bd->bd_ail = NULL;
  198. list_del(&bd->bd_ail_st_list);
  199. list_del(&bd->bd_ail_gl_list);
  200. atomic_dec(&gl->gl_ail_count);
  201. brelse(bh);
  202. gfs2_log_unlock(sdp);
  203. gfs2_trans_add_revoke(sdp, blkno);
  204. gfs2_log_lock(sdp);
  205. }
  206. gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
  207. gfs2_log_unlock(sdp);
  208. gfs2_trans_end(sdp);
  209. gfs2_log_flush(sdp, NULL);
  210. }
  211. /**
  212. * gfs2_meta_inval - Invalidate all buffers associated with a glock
  213. * @gl: the glock
  214. *
  215. */
  216. void gfs2_meta_inval(struct gfs2_glock *gl)
  217. {
  218. struct gfs2_sbd *sdp = gl->gl_sbd;
  219. struct inode *aspace = gl->gl_aspace;
  220. struct address_space *mapping = gl->gl_aspace->i_mapping;
  221. gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
  222. atomic_inc(&aspace->i_writecount);
  223. truncate_inode_pages(mapping, 0);
  224. atomic_dec(&aspace->i_writecount);
  225. gfs2_assert_withdraw(sdp, !mapping->nrpages);
  226. }
  227. /**
  228. * gfs2_meta_sync - Sync all buffers associated with a glock
  229. * @gl: The glock
  230. * @flags: DIO_START | DIO_WAIT
  231. *
  232. */
  233. void gfs2_meta_sync(struct gfs2_glock *gl, int flags)
  234. {
  235. struct address_space *mapping = gl->gl_aspace->i_mapping;
  236. int error = 0;
  237. if (flags & DIO_START)
  238. filemap_fdatawrite(mapping);
  239. if (!error && (flags & DIO_WAIT))
  240. error = filemap_fdatawait(mapping);
  241. if (error)
  242. gfs2_io_error(gl->gl_sbd);
  243. }
  244. /**
  245. * getbuf - Get a buffer with a given address space
  246. * @sdp: the filesystem
  247. * @aspace: the address space
  248. * @blkno: the block number (filesystem scope)
  249. * @create: 1 if the buffer should be created
  250. *
  251. * Returns: the buffer
  252. */
  253. static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
  254. u64 blkno, int create)
  255. {
  256. struct page *page;
  257. struct buffer_head *bh;
  258. unsigned int shift;
  259. unsigned long index;
  260. unsigned int bufnum;
  261. shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
  262. index = blkno >> shift; /* convert block to page */
  263. bufnum = blkno - (index << shift); /* block buf index within page */
  264. if (create) {
  265. for (;;) {
  266. page = grab_cache_page(aspace->i_mapping, index);
  267. if (page)
  268. break;
  269. yield();
  270. }
  271. } else {
  272. page = find_lock_page(aspace->i_mapping, index);
  273. if (!page)
  274. return NULL;
  275. }
  276. if (!page_has_buffers(page))
  277. create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
  278. /* Locate header for our buffer within our page */
  279. for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
  280. /* Do nothing */;
  281. get_bh(bh);
  282. if (!buffer_mapped(bh))
  283. map_bh(bh, sdp->sd_vfs, blkno);
  284. unlock_page(page);
  285. mark_page_accessed(page);
  286. page_cache_release(page);
  287. return bh;
  288. }
  289. static void meta_prep_new(struct buffer_head *bh)
  290. {
  291. struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
  292. lock_buffer(bh);
  293. clear_buffer_dirty(bh);
  294. set_buffer_uptodate(bh);
  295. unlock_buffer(bh);
  296. mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
  297. }
  298. /**
  299. * gfs2_meta_new - Get a block
  300. * @gl: The glock associated with this block
  301. * @blkno: The block number
  302. *
  303. * Returns: The buffer
  304. */
  305. struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
  306. {
  307. struct buffer_head *bh;
  308. bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
  309. meta_prep_new(bh);
  310. return bh;
  311. }
  312. /**
  313. * gfs2_meta_read - Read a block from disk
  314. * @gl: The glock covering the block
  315. * @blkno: The block number
  316. * @flags: flags to gfs2_dreread()
  317. * @bhp: the place where the buffer is returned (NULL on failure)
  318. *
  319. * Returns: errno
  320. */
  321. int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
  322. struct buffer_head **bhp)
  323. {
  324. int error;
  325. *bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
  326. error = gfs2_meta_reread(gl->gl_sbd, *bhp, flags);
  327. if (error)
  328. brelse(*bhp);
  329. return error;
  330. }
  331. /**
  332. * gfs2_meta_reread - Reread a block from disk
  333. * @sdp: the filesystem
  334. * @bh: The block to read
  335. * @flags: Flags that control the read
  336. *
  337. * Returns: errno
  338. */
  339. int gfs2_meta_reread(struct gfs2_sbd *sdp, struct buffer_head *bh, int flags)
  340. {
  341. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  342. return -EIO;
  343. if (flags & DIO_FORCE)
  344. clear_buffer_uptodate(bh);
  345. if ((flags & DIO_START) && !buffer_uptodate(bh))
  346. ll_rw_block(READ, 1, &bh);
  347. if (flags & DIO_WAIT) {
  348. wait_on_buffer(bh);
  349. if (!buffer_uptodate(bh)) {
  350. struct gfs2_trans *tr = current->journal_info;
  351. if (tr && tr->tr_touched)
  352. gfs2_io_error_bh(sdp, bh);
  353. return -EIO;
  354. }
  355. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  356. return -EIO;
  357. }
  358. return 0;
  359. }
  360. /**
  361. * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
  362. * @gl: the glock the buffer belongs to
  363. * @bh: The buffer to be attached to
  364. * @meta: Flag to indicate whether its metadata or not
  365. */
  366. void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
  367. int meta)
  368. {
  369. struct gfs2_bufdata *bd;
  370. if (meta)
  371. lock_page(bh->b_page);
  372. if (bh->b_private) {
  373. if (meta)
  374. unlock_page(bh->b_page);
  375. return;
  376. }
  377. bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
  378. memset(bd, 0, sizeof(struct gfs2_bufdata));
  379. bd->bd_bh = bh;
  380. bd->bd_gl = gl;
  381. INIT_LIST_HEAD(&bd->bd_list_tr);
  382. if (meta)
  383. lops_init_le(&bd->bd_le, &gfs2_buf_lops);
  384. else
  385. lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
  386. bh->b_private = bd;
  387. if (meta)
  388. unlock_page(bh->b_page);
  389. }
  390. /**
  391. * gfs2_pin - Pin a buffer in memory
  392. * @sdp: the filesystem the buffer belongs to
  393. * @bh: The buffer to be pinned
  394. *
  395. */
  396. void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
  397. {
  398. struct gfs2_bufdata *bd = bh->b_private;
  399. gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
  400. if (test_set_buffer_pinned(bh))
  401. gfs2_assert_withdraw(sdp, 0);
  402. wait_on_buffer(bh);
  403. /* If this buffer is in the AIL and it has already been written
  404. to in-place disk block, remove it from the AIL. */
  405. gfs2_log_lock(sdp);
  406. if (bd->bd_ail && !buffer_in_io(bh))
  407. list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
  408. gfs2_log_unlock(sdp);
  409. clear_buffer_dirty(bh);
  410. wait_on_buffer(bh);
  411. if (!buffer_uptodate(bh))
  412. gfs2_io_error_bh(sdp, bh);
  413. get_bh(bh);
  414. }
  415. /**
  416. * gfs2_unpin - Unpin a buffer
  417. * @sdp: the filesystem the buffer belongs to
  418. * @bh: The buffer to unpin
  419. * @ai:
  420. *
  421. */
  422. void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
  423. struct gfs2_ail *ai)
  424. {
  425. struct gfs2_bufdata *bd = bh->b_private;
  426. gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
  427. if (!buffer_pinned(bh))
  428. gfs2_assert_withdraw(sdp, 0);
  429. mark_buffer_dirty(bh);
  430. clear_buffer_pinned(bh);
  431. gfs2_log_lock(sdp);
  432. if (bd->bd_ail) {
  433. list_del(&bd->bd_ail_st_list);
  434. brelse(bh);
  435. } else {
  436. struct gfs2_glock *gl = bd->bd_gl;
  437. list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
  438. atomic_inc(&gl->gl_ail_count);
  439. }
  440. bd->bd_ail = ai;
  441. list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
  442. gfs2_log_unlock(sdp);
  443. }
  444. /**
  445. * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
  446. * @ip: the inode who owns the buffers
  447. * @bstart: the first buffer in the run
  448. * @blen: the number of buffers in the run
  449. *
  450. */
  451. void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
  452. {
  453. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  454. struct inode *aspace = ip->i_gl->gl_aspace;
  455. struct buffer_head *bh;
  456. while (blen) {
  457. bh = getbuf(sdp, aspace, bstart, NO_CREATE);
  458. if (bh) {
  459. struct gfs2_bufdata *bd = bh->b_private;
  460. if (test_clear_buffer_pinned(bh)) {
  461. struct gfs2_trans *tr = current->journal_info;
  462. gfs2_log_lock(sdp);
  463. list_del_init(&bd->bd_le.le_list);
  464. gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
  465. sdp->sd_log_num_buf--;
  466. gfs2_log_unlock(sdp);
  467. tr->tr_num_buf_rm++;
  468. brelse(bh);
  469. }
  470. if (bd) {
  471. gfs2_log_lock(sdp);
  472. if (bd->bd_ail) {
  473. u64 blkno = bh->b_blocknr;
  474. bd->bd_ail = NULL;
  475. list_del(&bd->bd_ail_st_list);
  476. list_del(&bd->bd_ail_gl_list);
  477. atomic_dec(&bd->bd_gl->gl_ail_count);
  478. brelse(bh);
  479. gfs2_log_unlock(sdp);
  480. gfs2_trans_add_revoke(sdp, blkno);
  481. } else
  482. gfs2_log_unlock(sdp);
  483. }
  484. lock_buffer(bh);
  485. clear_buffer_dirty(bh);
  486. clear_buffer_uptodate(bh);
  487. unlock_buffer(bh);
  488. brelse(bh);
  489. }
  490. bstart++;
  491. blen--;
  492. }
  493. }
  494. /**
  495. * gfs2_meta_cache_flush - get rid of any references on buffers for this inode
  496. * @ip: The GFS2 inode
  497. *
  498. * This releases buffers that are in the most-recently-used array of
  499. * blocks used for indirect block addressing for this inode.
  500. */
  501. void gfs2_meta_cache_flush(struct gfs2_inode *ip)
  502. {
  503. struct buffer_head **bh_slot;
  504. unsigned int x;
  505. spin_lock(&ip->i_spin);
  506. for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) {
  507. bh_slot = &ip->i_cache[x];
  508. if (!*bh_slot)
  509. break;
  510. brelse(*bh_slot);
  511. *bh_slot = NULL;
  512. }
  513. spin_unlock(&ip->i_spin);
  514. }
  515. /**
  516. * gfs2_meta_indirect_buffer - Get a metadata buffer
  517. * @ip: The GFS2 inode
  518. * @height: The level of this buf in the metadata (indir addr) tree (if any)
  519. * @num: The block number (device relative) of the buffer
  520. * @new: Non-zero if we may create a new buffer
  521. * @bhp: the buffer is returned here
  522. *
  523. * Try to use the gfs2_inode's MRU metadata tree cache.
  524. *
  525. * Returns: errno
  526. */
  527. int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
  528. int new, struct buffer_head **bhp)
  529. {
  530. struct buffer_head *bh, **bh_slot = ip->i_cache + height;
  531. int error;
  532. spin_lock(&ip->i_spin);
  533. bh = *bh_slot;
  534. if (bh) {
  535. if (bh->b_blocknr == num)
  536. get_bh(bh);
  537. else
  538. bh = NULL;
  539. }
  540. spin_unlock(&ip->i_spin);
  541. if (bh) {
  542. if (new)
  543. meta_prep_new(bh);
  544. else {
  545. error = gfs2_meta_reread(GFS2_SB(&ip->i_inode), bh,
  546. DIO_START | DIO_WAIT);
  547. if (error) {
  548. brelse(bh);
  549. return error;
  550. }
  551. }
  552. } else {
  553. if (new)
  554. bh = gfs2_meta_new(ip->i_gl, num);
  555. else {
  556. error = gfs2_meta_read(ip->i_gl, num,
  557. DIO_START | DIO_WAIT, &bh);
  558. if (error)
  559. return error;
  560. }
  561. spin_lock(&ip->i_spin);
  562. if (*bh_slot != bh) {
  563. brelse(*bh_slot);
  564. *bh_slot = bh;
  565. get_bh(bh);
  566. }
  567. spin_unlock(&ip->i_spin);
  568. }
  569. if (new) {
  570. if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), height)) {
  571. brelse(bh);
  572. return -EIO;
  573. }
  574. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  575. gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
  576. gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
  577. } else if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh,
  578. (height) ? GFS2_METATYPE_IN : GFS2_METATYPE_DI)) {
  579. brelse(bh);
  580. return -EIO;
  581. }
  582. *bhp = bh;
  583. return 0;
  584. }
  585. /**
  586. * gfs2_meta_ra - start readahead on an extent of a file
  587. * @gl: the glock the blocks belong to
  588. * @dblock: the starting disk block
  589. * @extlen: the number of blocks in the extent
  590. *
  591. */
  592. void gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
  593. {
  594. struct gfs2_sbd *sdp = gl->gl_sbd;
  595. struct inode *aspace = gl->gl_aspace;
  596. struct buffer_head *first_bh, *bh;
  597. u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
  598. sdp->sd_sb.sb_bsize_shift;
  599. int error;
  600. if (!extlen || !max_ra)
  601. return;
  602. if (extlen > max_ra)
  603. extlen = max_ra;
  604. first_bh = getbuf(sdp, aspace, dblock, CREATE);
  605. if (buffer_uptodate(first_bh))
  606. goto out;
  607. if (!buffer_locked(first_bh)) {
  608. error = gfs2_meta_reread(sdp, first_bh, DIO_START);
  609. if (error)
  610. goto out;
  611. }
  612. dblock++;
  613. extlen--;
  614. while (extlen) {
  615. bh = getbuf(sdp, aspace, dblock, CREATE);
  616. if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
  617. error = gfs2_meta_reread(sdp, bh, DIO_START);
  618. brelse(bh);
  619. if (error)
  620. goto out;
  621. } else
  622. brelse(bh);
  623. dblock++;
  624. extlen--;
  625. if (buffer_uptodate(first_bh))
  626. break;
  627. }
  628. out:
  629. brelse(first_bh);
  630. }
  631. /**
  632. * gfs2_meta_syncfs - sync all the buffers in a filesystem
  633. * @sdp: the filesystem
  634. *
  635. */
  636. void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
  637. {
  638. gfs2_log_flush(sdp, NULL);
  639. for (;;) {
  640. gfs2_ail1_start(sdp, DIO_ALL);
  641. if (gfs2_ail1_empty(sdp, DIO_ALL))
  642. break;
  643. msleep(10);
  644. }
  645. }