meta_io.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/mm.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/writeback.h>
  17. #include <linux/swap.h>
  18. #include <linux/delay.h>
  19. #include <linux/gfs2_ondisk.h>
  20. #include "gfs2.h"
  21. #include "lm_interface.h"
  22. #include "incore.h"
  23. #include "glock.h"
  24. #include "glops.h"
  25. #include "inode.h"
  26. #include "log.h"
  27. #include "lops.h"
  28. #include "meta_io.h"
  29. #include "rgrp.h"
  30. #include "trans.h"
  31. #include "util.h"
  32. #include "ops_address.h"
  33. #define buffer_busy(bh) \
  34. ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
  35. #define buffer_in_io(bh) \
  36. ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock)))
  37. static int aspace_get_block(struct inode *inode, sector_t lblock,
  38. struct buffer_head *bh_result, int create)
  39. {
  40. gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
  41. return -EOPNOTSUPP;
  42. }
  43. static int gfs2_aspace_writepage(struct page *page,
  44. struct writeback_control *wbc)
  45. {
  46. return block_write_full_page(page, aspace_get_block, wbc);
  47. }
  48. static const struct address_space_operations aspace_aops = {
  49. .writepage = gfs2_aspace_writepage,
  50. .releasepage = gfs2_releasepage,
  51. };
  52. /**
  53. * gfs2_aspace_get - Create and initialize a struct inode structure
  54. * @sdp: the filesystem the aspace is in
  55. *
  56. * Right now a struct inode is just a struct inode. Maybe Linux
  57. * will supply a more lightweight address space construct (that works)
  58. * in the future.
  59. *
  60. * Make sure pages/buffers in this aspace aren't in high memory.
  61. *
  62. * Returns: the aspace
  63. */
  64. struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
  65. {
  66. struct inode *aspace;
  67. aspace = new_inode(sdp->sd_vfs);
  68. if (aspace) {
  69. mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
  70. aspace->i_mapping->a_ops = &aspace_aops;
  71. aspace->i_size = ~0ULL;
  72. aspace->u.generic_ip = NULL;
  73. insert_inode_hash(aspace);
  74. }
  75. return aspace;
  76. }
  77. void gfs2_aspace_put(struct inode *aspace)
  78. {
  79. remove_inode_hash(aspace);
  80. iput(aspace);
  81. }
  82. /**
  83. * gfs2_ail1_start_one - Start I/O on a part of the AIL
  84. * @sdp: the filesystem
  85. * @tr: the part of the AIL
  86. *
  87. */
  88. void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  89. {
  90. struct gfs2_bufdata *bd, *s;
  91. struct buffer_head *bh;
  92. int retry;
  93. BUG_ON(!spin_is_locked(&sdp->sd_log_lock));
  94. do {
  95. retry = 0;
  96. list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
  97. bd_ail_st_list) {
  98. bh = bd->bd_bh;
  99. gfs2_assert(sdp, bd->bd_ail == ai);
  100. if (!buffer_busy(bh)) {
  101. if (!buffer_uptodate(bh)) {
  102. gfs2_log_unlock(sdp);
  103. gfs2_io_error_bh(sdp, bh);
  104. gfs2_log_lock(sdp);
  105. }
  106. list_move(&bd->bd_ail_st_list,
  107. &ai->ai_ail2_list);
  108. continue;
  109. }
  110. if (!buffer_dirty(bh))
  111. continue;
  112. list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
  113. gfs2_log_unlock(sdp);
  114. wait_on_buffer(bh);
  115. ll_rw_block(WRITE, 1, &bh);
  116. gfs2_log_lock(sdp);
  117. retry = 1;
  118. break;
  119. }
  120. } while (retry);
  121. }
  122. /**
  123. * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
  124. * @sdp: the filesystem
  125. * @ai: the AIL entry
  126. *
  127. */
  128. int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags)
  129. {
  130. struct gfs2_bufdata *bd, *s;
  131. struct buffer_head *bh;
  132. list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
  133. bd_ail_st_list) {
  134. bh = bd->bd_bh;
  135. gfs2_assert(sdp, bd->bd_ail == ai);
  136. if (buffer_busy(bh)) {
  137. if (flags & DIO_ALL)
  138. continue;
  139. else
  140. break;
  141. }
  142. if (!buffer_uptodate(bh))
  143. gfs2_io_error_bh(sdp, bh);
  144. list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
  145. }
  146. return list_empty(&ai->ai_ail1_list);
  147. }
  148. /**
  149. * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
  150. * @sdp: the filesystem
  151. * @ai: the AIL entry
  152. *
  153. */
  154. void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  155. {
  156. struct list_head *head = &ai->ai_ail2_list;
  157. struct gfs2_bufdata *bd;
  158. while (!list_empty(head)) {
  159. bd = list_entry(head->prev, struct gfs2_bufdata,
  160. bd_ail_st_list);
  161. gfs2_assert(sdp, bd->bd_ail == ai);
  162. bd->bd_ail = NULL;
  163. list_del(&bd->bd_ail_st_list);
  164. list_del(&bd->bd_ail_gl_list);
  165. atomic_dec(&bd->bd_gl->gl_ail_count);
  166. brelse(bd->bd_bh);
  167. }
  168. }
  169. /**
  170. * ail_empty_gl - remove all buffers for a given lock from the AIL
  171. * @gl: the glock
  172. *
  173. * None of the buffers should be dirty, locked, or pinned.
  174. */
  175. void gfs2_ail_empty_gl(struct gfs2_glock *gl)
  176. {
  177. struct gfs2_sbd *sdp = gl->gl_sbd;
  178. unsigned int blocks;
  179. struct list_head *head = &gl->gl_ail_list;
  180. struct gfs2_bufdata *bd;
  181. struct buffer_head *bh;
  182. uint64_t blkno;
  183. int error;
  184. blocks = atomic_read(&gl->gl_ail_count);
  185. if (!blocks)
  186. return;
  187. error = gfs2_trans_begin(sdp, 0, blocks);
  188. if (gfs2_assert_withdraw(sdp, !error))
  189. return;
  190. gfs2_log_lock(sdp);
  191. while (!list_empty(head)) {
  192. bd = list_entry(head->next, struct gfs2_bufdata,
  193. bd_ail_gl_list);
  194. bh = bd->bd_bh;
  195. blkno = bh->b_blocknr;
  196. gfs2_assert_withdraw(sdp, !buffer_busy(bh));
  197. bd->bd_ail = NULL;
  198. list_del(&bd->bd_ail_st_list);
  199. list_del(&bd->bd_ail_gl_list);
  200. atomic_dec(&gl->gl_ail_count);
  201. brelse(bh);
  202. gfs2_log_unlock(sdp);
  203. gfs2_trans_add_revoke(sdp, blkno);
  204. gfs2_log_lock(sdp);
  205. }
  206. gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
  207. gfs2_log_unlock(sdp);
  208. gfs2_trans_end(sdp);
  209. gfs2_log_flush(sdp, NULL);
  210. }
  211. /**
  212. * gfs2_meta_inval - Invalidate all buffers associated with a glock
  213. * @gl: the glock
  214. *
  215. */
  216. void gfs2_meta_inval(struct gfs2_glock *gl)
  217. {
  218. struct gfs2_sbd *sdp = gl->gl_sbd;
  219. struct inode *aspace = gl->gl_aspace;
  220. struct address_space *mapping = gl->gl_aspace->i_mapping;
  221. gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
  222. atomic_inc(&aspace->i_writecount);
  223. truncate_inode_pages(mapping, 0);
  224. atomic_dec(&aspace->i_writecount);
  225. gfs2_assert_withdraw(sdp, !mapping->nrpages);
  226. }
  227. /**
  228. * gfs2_meta_sync - Sync all buffers associated with a glock
  229. * @gl: The glock
  230. * @flags: DIO_START | DIO_WAIT
  231. *
  232. */
  233. void gfs2_meta_sync(struct gfs2_glock *gl, int flags)
  234. {
  235. struct address_space *mapping = gl->gl_aspace->i_mapping;
  236. int error = 0;
  237. if (flags & DIO_START)
  238. filemap_fdatawrite(mapping);
  239. if (!error && (flags & DIO_WAIT))
  240. error = filemap_fdatawait(mapping);
  241. if (error)
  242. gfs2_io_error(gl->gl_sbd);
  243. }
  244. /**
  245. * getbuf - Get a buffer with a given address space
  246. * @sdp: the filesystem
  247. * @aspace: the address space
  248. * @blkno: the block number (filesystem scope)
  249. * @create: 1 if the buffer should be created
  250. *
  251. * Returns: the buffer
  252. */
  253. static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
  254. uint64_t blkno, int create)
  255. {
  256. struct page *page;
  257. struct buffer_head *bh;
  258. unsigned int shift;
  259. unsigned long index;
  260. unsigned int bufnum;
  261. shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
  262. index = blkno >> shift; /* convert block to page */
  263. bufnum = blkno - (index << shift); /* block buf index within page */
  264. if (create) {
  265. for (;;) {
  266. page = grab_cache_page(aspace->i_mapping, index);
  267. if (page)
  268. break;
  269. yield();
  270. }
  271. } else {
  272. page = find_lock_page(aspace->i_mapping, index);
  273. if (!page)
  274. return NULL;
  275. }
  276. if (!page_has_buffers(page))
  277. create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
  278. /* Locate header for our buffer within our page */
  279. for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
  280. /* Do nothing */;
  281. get_bh(bh);
  282. if (!buffer_mapped(bh))
  283. map_bh(bh, sdp->sd_vfs, blkno);
  284. unlock_page(page);
  285. mark_page_accessed(page);
  286. page_cache_release(page);
  287. return bh;
  288. }
  289. static void meta_prep_new(struct buffer_head *bh)
  290. {
  291. struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
  292. lock_buffer(bh);
  293. clear_buffer_dirty(bh);
  294. set_buffer_uptodate(bh);
  295. unlock_buffer(bh);
  296. mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
  297. }
  298. /**
  299. * gfs2_meta_new - Get a block
  300. * @gl: The glock associated with this block
  301. * @blkno: The block number
  302. *
  303. * Returns: The buffer
  304. */
  305. struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, uint64_t blkno)
  306. {
  307. struct buffer_head *bh;
  308. bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
  309. meta_prep_new(bh);
  310. return bh;
  311. }
  312. /**
  313. * gfs2_meta_read - Read a block from disk
  314. * @gl: The glock covering the block
  315. * @blkno: The block number
  316. * @flags: flags to gfs2_dreread()
  317. * @bhp: the place where the buffer is returned (NULL on failure)
  318. *
  319. * Returns: errno
  320. */
  321. int gfs2_meta_read(struct gfs2_glock *gl, uint64_t blkno, int flags,
  322. struct buffer_head **bhp)
  323. {
  324. int error;
  325. *bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
  326. error = gfs2_meta_reread(gl->gl_sbd, *bhp, flags);
  327. if (error)
  328. brelse(*bhp);
  329. return error;
  330. }
  331. /**
  332. * gfs2_meta_reread - Reread a block from disk
  333. * @sdp: the filesystem
  334. * @bh: The block to read
  335. * @flags: Flags that control the read
  336. *
  337. * Returns: errno
  338. */
  339. int gfs2_meta_reread(struct gfs2_sbd *sdp, struct buffer_head *bh, int flags)
  340. {
  341. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  342. return -EIO;
  343. if (flags & DIO_FORCE)
  344. clear_buffer_uptodate(bh);
  345. if ((flags & DIO_START) && !buffer_uptodate(bh))
  346. ll_rw_block(READ, 1, &bh);
  347. if (flags & DIO_WAIT) {
  348. wait_on_buffer(bh);
  349. if (!buffer_uptodate(bh)) {
  350. struct gfs2_trans *tr = current->journal_info;
  351. if (tr && tr->tr_touched)
  352. gfs2_io_error_bh(sdp, bh);
  353. return -EIO;
  354. }
  355. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  356. return -EIO;
  357. }
  358. return 0;
  359. }
  360. /**
  361. * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
  362. * @gl: the glock the buffer belongs to
  363. * @bh: The buffer to be attached to
  364. * @meta: Flag to indicate whether its metadata or not
  365. */
  366. void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
  367. int meta)
  368. {
  369. struct gfs2_bufdata *bd;
  370. if (meta)
  371. lock_page(bh->b_page);
  372. if (bh->b_private) {
  373. if (meta)
  374. unlock_page(bh->b_page);
  375. return;
  376. }
  377. bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
  378. memset(bd, 0, sizeof(struct gfs2_bufdata));
  379. bd->bd_bh = bh;
  380. bd->bd_gl = gl;
  381. INIT_LIST_HEAD(&bd->bd_list_tr);
  382. if (meta) {
  383. lops_init_le(&bd->bd_le, &gfs2_buf_lops);
  384. } else {
  385. lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
  386. }
  387. bh->b_private = bd;
  388. if (meta)
  389. unlock_page(bh->b_page);
  390. }
  391. /**
  392. * gfs2_pin - Pin a buffer in memory
  393. * @sdp: the filesystem the buffer belongs to
  394. * @bh: The buffer to be pinned
  395. *
  396. */
  397. void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
  398. {
  399. struct gfs2_bufdata *bd = bh->b_private;
  400. gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
  401. if (test_set_buffer_pinned(bh))
  402. gfs2_assert_withdraw(sdp, 0);
  403. wait_on_buffer(bh);
  404. /* If this buffer is in the AIL and it has already been written
  405. to in-place disk block, remove it from the AIL. */
  406. gfs2_log_lock(sdp);
  407. if (bd->bd_ail && !buffer_in_io(bh))
  408. list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
  409. gfs2_log_unlock(sdp);
  410. clear_buffer_dirty(bh);
  411. wait_on_buffer(bh);
  412. if (!buffer_uptodate(bh))
  413. gfs2_io_error_bh(sdp, bh);
  414. get_bh(bh);
  415. }
  416. /**
  417. * gfs2_unpin - Unpin a buffer
  418. * @sdp: the filesystem the buffer belongs to
  419. * @bh: The buffer to unpin
  420. * @ai:
  421. *
  422. */
  423. void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
  424. struct gfs2_ail *ai)
  425. {
  426. struct gfs2_bufdata *bd = bh->b_private;
  427. gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
  428. if (!buffer_pinned(bh))
  429. gfs2_assert_withdraw(sdp, 0);
  430. mark_buffer_dirty(bh);
  431. clear_buffer_pinned(bh);
  432. gfs2_log_lock(sdp);
  433. if (bd->bd_ail) {
  434. list_del(&bd->bd_ail_st_list);
  435. brelse(bh);
  436. } else {
  437. struct gfs2_glock *gl = bd->bd_gl;
  438. list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
  439. atomic_inc(&gl->gl_ail_count);
  440. }
  441. bd->bd_ail = ai;
  442. list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
  443. gfs2_log_unlock(sdp);
  444. }
  445. /**
  446. * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
  447. * @ip: the inode who owns the buffers
  448. * @bstart: the first buffer in the run
  449. * @blen: the number of buffers in the run
  450. *
  451. */
  452. void gfs2_meta_wipe(struct gfs2_inode *ip, uint64_t bstart, uint32_t blen)
  453. {
  454. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  455. struct inode *aspace = ip->i_gl->gl_aspace;
  456. struct buffer_head *bh;
  457. while (blen) {
  458. bh = getbuf(sdp, aspace, bstart, NO_CREATE);
  459. if (bh) {
  460. struct gfs2_bufdata *bd = bh->b_private;
  461. if (test_clear_buffer_pinned(bh)) {
  462. struct gfs2_trans *tr = current->journal_info;
  463. gfs2_log_lock(sdp);
  464. list_del_init(&bd->bd_le.le_list);
  465. gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
  466. sdp->sd_log_num_buf--;
  467. gfs2_log_unlock(sdp);
  468. tr->tr_num_buf_rm++;
  469. brelse(bh);
  470. }
  471. if (bd) {
  472. gfs2_log_lock(sdp);
  473. if (bd->bd_ail) {
  474. uint64_t blkno = bh->b_blocknr;
  475. bd->bd_ail = NULL;
  476. list_del(&bd->bd_ail_st_list);
  477. list_del(&bd->bd_ail_gl_list);
  478. atomic_dec(&bd->bd_gl->gl_ail_count);
  479. brelse(bh);
  480. gfs2_log_unlock(sdp);
  481. gfs2_trans_add_revoke(sdp, blkno);
  482. } else
  483. gfs2_log_unlock(sdp);
  484. }
  485. lock_buffer(bh);
  486. clear_buffer_dirty(bh);
  487. clear_buffer_uptodate(bh);
  488. unlock_buffer(bh);
  489. brelse(bh);
  490. }
  491. bstart++;
  492. blen--;
  493. }
  494. }
  495. /**
  496. * gfs2_meta_cache_flush - get rid of any references on buffers for this inode
  497. * @ip: The GFS2 inode
  498. *
  499. * This releases buffers that are in the most-recently-used array of
  500. * blocks used for indirect block addressing for this inode.
  501. */
  502. void gfs2_meta_cache_flush(struct gfs2_inode *ip)
  503. {
  504. struct buffer_head **bh_slot;
  505. unsigned int x;
  506. spin_lock(&ip->i_spin);
  507. for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) {
  508. bh_slot = &ip->i_cache[x];
  509. if (!*bh_slot)
  510. break;
  511. brelse(*bh_slot);
  512. *bh_slot = NULL;
  513. }
  514. spin_unlock(&ip->i_spin);
  515. }
  516. /**
  517. * gfs2_meta_indirect_buffer - Get a metadata buffer
  518. * @ip: The GFS2 inode
  519. * @height: The level of this buf in the metadata (indir addr) tree (if any)
  520. * @num: The block number (device relative) of the buffer
  521. * @new: Non-zero if we may create a new buffer
  522. * @bhp: the buffer is returned here
  523. *
  524. * Try to use the gfs2_inode's MRU metadata tree cache.
  525. *
  526. * Returns: errno
  527. */
  528. int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, uint64_t num,
  529. int new, struct buffer_head **bhp)
  530. {
  531. struct buffer_head *bh, **bh_slot = ip->i_cache + height;
  532. int error;
  533. spin_lock(&ip->i_spin);
  534. bh = *bh_slot;
  535. if (bh) {
  536. if (bh->b_blocknr == num)
  537. get_bh(bh);
  538. else
  539. bh = NULL;
  540. }
  541. spin_unlock(&ip->i_spin);
  542. if (bh) {
  543. if (new)
  544. meta_prep_new(bh);
  545. else {
  546. error = gfs2_meta_reread(GFS2_SB(&ip->i_inode), bh,
  547. DIO_START | DIO_WAIT);
  548. if (error) {
  549. brelse(bh);
  550. return error;
  551. }
  552. }
  553. } else {
  554. if (new)
  555. bh = gfs2_meta_new(ip->i_gl, num);
  556. else {
  557. error = gfs2_meta_read(ip->i_gl, num,
  558. DIO_START | DIO_WAIT, &bh);
  559. if (error)
  560. return error;
  561. }
  562. spin_lock(&ip->i_spin);
  563. if (*bh_slot != bh) {
  564. brelse(*bh_slot);
  565. *bh_slot = bh;
  566. get_bh(bh);
  567. }
  568. spin_unlock(&ip->i_spin);
  569. }
  570. if (new) {
  571. if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), height)) {
  572. brelse(bh);
  573. return -EIO;
  574. }
  575. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  576. gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
  577. gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
  578. } else if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh,
  579. (height) ? GFS2_METATYPE_IN : GFS2_METATYPE_DI)) {
  580. brelse(bh);
  581. return -EIO;
  582. }
  583. *bhp = bh;
  584. return 0;
  585. }
  586. /**
  587. * gfs2_meta_ra - start readahead on an extent of a file
  588. * @gl: the glock the blocks belong to
  589. * @dblock: the starting disk block
  590. * @extlen: the number of blocks in the extent
  591. *
  592. */
  593. void gfs2_meta_ra(struct gfs2_glock *gl, uint64_t dblock, uint32_t extlen)
  594. {
  595. struct gfs2_sbd *sdp = gl->gl_sbd;
  596. struct inode *aspace = gl->gl_aspace;
  597. struct buffer_head *first_bh, *bh;
  598. uint32_t max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
  599. sdp->sd_sb.sb_bsize_shift;
  600. int error;
  601. if (!extlen || !max_ra)
  602. return;
  603. if (extlen > max_ra)
  604. extlen = max_ra;
  605. first_bh = getbuf(sdp, aspace, dblock, CREATE);
  606. if (buffer_uptodate(first_bh))
  607. goto out;
  608. if (!buffer_locked(first_bh)) {
  609. error = gfs2_meta_reread(sdp, first_bh, DIO_START);
  610. if (error)
  611. goto out;
  612. }
  613. dblock++;
  614. extlen--;
  615. while (extlen) {
  616. bh = getbuf(sdp, aspace, dblock, CREATE);
  617. if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
  618. error = gfs2_meta_reread(sdp, bh, DIO_START);
  619. brelse(bh);
  620. if (error)
  621. goto out;
  622. } else
  623. brelse(bh);
  624. dblock++;
  625. extlen--;
  626. if (buffer_uptodate(first_bh))
  627. break;
  628. }
  629. out:
  630. brelse(first_bh);
  631. }
  632. /**
  633. * gfs2_meta_syncfs - sync all the buffers in a filesystem
  634. * @sdp: the filesystem
  635. *
  636. */
  637. void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
  638. {
  639. gfs2_log_flush(sdp, NULL);
  640. for (;;) {
  641. gfs2_ail1_start(sdp, DIO_ALL);
  642. if (gfs2_ail1_empty(sdp, DIO_ALL))
  643. break;
  644. msleep(10);
  645. }
  646. }