lops.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/mempool.h>
  15. #include <linux/gfs2_ondisk.h>
  16. #include <linux/bio.h>
  17. #include <linux/fs.h>
  18. #include "gfs2.h"
  19. #include "incore.h"
  20. #include "inode.h"
  21. #include "glock.h"
  22. #include "log.h"
  23. #include "lops.h"
  24. #include "meta_io.h"
  25. #include "recovery.h"
  26. #include "rgrp.h"
  27. #include "trans.h"
  28. #include "util.h"
  29. #include "trace_gfs2.h"
  30. /**
  31. * gfs2_pin - Pin a buffer in memory
  32. * @sdp: The superblock
  33. * @bh: The buffer to be pinned
  34. *
  35. * The log lock must be held when calling this function
  36. */
  37. static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
  38. {
  39. struct gfs2_bufdata *bd;
  40. BUG_ON(!current->journal_info);
  41. clear_buffer_dirty(bh);
  42. if (test_set_buffer_pinned(bh))
  43. gfs2_assert_withdraw(sdp, 0);
  44. if (!buffer_uptodate(bh))
  45. gfs2_io_error_bh(sdp, bh);
  46. bd = bh->b_private;
  47. /* If this buffer is in the AIL and it has already been written
  48. * to in-place disk block, remove it from the AIL.
  49. */
  50. spin_lock(&sdp->sd_ail_lock);
  51. if (bd->bd_ail)
  52. list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
  53. spin_unlock(&sdp->sd_ail_lock);
  54. get_bh(bh);
  55. atomic_inc(&sdp->sd_log_pinned);
  56. trace_gfs2_pin(bd, 1);
  57. }
  58. static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
  59. {
  60. return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
  61. }
  62. static void maybe_release_space(struct gfs2_bufdata *bd)
  63. {
  64. struct gfs2_glock *gl = bd->bd_gl;
  65. struct gfs2_sbd *sdp = gl->gl_sbd;
  66. struct gfs2_rgrpd *rgd = gl->gl_object;
  67. unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
  68. struct gfs2_bitmap *bi = rgd->rd_bits + index;
  69. if (bi->bi_clone == 0)
  70. return;
  71. if (sdp->sd_args.ar_discard)
  72. gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
  73. memcpy(bi->bi_clone + bi->bi_offset,
  74. bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
  75. clear_bit(GBF_FULL, &bi->bi_flags);
  76. rgd->rd_free_clone = rgd->rd_free;
  77. }
  78. /**
  79. * gfs2_unpin - Unpin a buffer
  80. * @sdp: the filesystem the buffer belongs to
  81. * @bh: The buffer to unpin
  82. * @ai:
  83. * @flags: The inode dirty flags
  84. *
  85. */
  86. static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
  87. struct gfs2_ail *ai)
  88. {
  89. struct gfs2_bufdata *bd = bh->b_private;
  90. BUG_ON(!buffer_uptodate(bh));
  91. BUG_ON(!buffer_pinned(bh));
  92. lock_buffer(bh);
  93. mark_buffer_dirty(bh);
  94. clear_buffer_pinned(bh);
  95. if (buffer_is_rgrp(bd))
  96. maybe_release_space(bd);
  97. spin_lock(&sdp->sd_ail_lock);
  98. if (bd->bd_ail) {
  99. list_del(&bd->bd_ail_st_list);
  100. brelse(bh);
  101. } else {
  102. struct gfs2_glock *gl = bd->bd_gl;
  103. list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
  104. atomic_inc(&gl->gl_ail_count);
  105. }
  106. bd->bd_ail = ai;
  107. list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
  108. spin_unlock(&sdp->sd_ail_lock);
  109. clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
  110. trace_gfs2_pin(bd, 0);
  111. unlock_buffer(bh);
  112. atomic_dec(&sdp->sd_log_pinned);
  113. }
  114. static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
  115. {
  116. BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
  117. (sdp->sd_log_flush_head != sdp->sd_log_head));
  118. if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
  119. sdp->sd_log_flush_head = 0;
  120. sdp->sd_log_flush_wrapped = 1;
  121. }
  122. }
  123. static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
  124. {
  125. unsigned int lbn = sdp->sd_log_flush_head;
  126. struct gfs2_journal_extent *je;
  127. u64 block;
  128. list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
  129. if (lbn >= je->lblock && lbn < je->lblock + je->blocks) {
  130. block = je->dblock + lbn - je->lblock;
  131. gfs2_log_incr_head(sdp);
  132. return block;
  133. }
  134. }
  135. return -1;
  136. }
  137. /**
  138. * gfs2_end_log_write_bh - end log write of pagecache data with buffers
  139. * @sdp: The superblock
  140. * @bvec: The bio_vec
  141. * @error: The i/o status
  142. *
  143. * This finds the relavent buffers and unlocks then and sets the
  144. * error flag according to the status of the i/o request. This is
  145. * used when the log is writing data which has an in-place version
  146. * that is pinned in the pagecache.
  147. */
  148. static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
  149. int error)
  150. {
  151. struct buffer_head *bh, *next;
  152. struct page *page = bvec->bv_page;
  153. unsigned size;
  154. bh = page_buffers(page);
  155. size = bvec->bv_len;
  156. while (bh_offset(bh) < bvec->bv_offset)
  157. bh = bh->b_this_page;
  158. do {
  159. if (error)
  160. set_buffer_write_io_error(bh);
  161. unlock_buffer(bh);
  162. next = bh->b_this_page;
  163. size -= bh->b_size;
  164. brelse(bh);
  165. bh = next;
  166. } while(bh && size);
  167. }
  168. /**
  169. * gfs2_end_log_write - end of i/o to the log
  170. * @bio: The bio
  171. * @error: Status of i/o request
  172. *
  173. * Each bio_vec contains either data from the pagecache or data
  174. * relating to the log itself. Here we iterate over the bio_vec
  175. * array, processing both kinds of data.
  176. *
  177. */
  178. static void gfs2_end_log_write(struct bio *bio, int error)
  179. {
  180. struct gfs2_sbd *sdp = bio->bi_private;
  181. struct bio_vec *bvec;
  182. struct page *page;
  183. int i;
  184. if (error) {
  185. sdp->sd_log_error = error;
  186. fs_err(sdp, "Error %d writing to log\n", error);
  187. }
  188. bio_for_each_segment(bvec, bio, i) {
  189. page = bvec->bv_page;
  190. if (page_has_buffers(page))
  191. gfs2_end_log_write_bh(sdp, bvec, error);
  192. else
  193. mempool_free(page, gfs2_page_pool);
  194. }
  195. bio_put(bio);
  196. if (atomic_dec_and_test(&sdp->sd_log_in_flight))
  197. wake_up(&sdp->sd_log_flush_wait);
  198. }
  199. /**
  200. * gfs2_log_flush_bio - Submit any pending log bio
  201. * @sdp: The superblock
  202. * @rw: The rw flags
  203. *
  204. * Submit any pending part-built or full bio to the block device. If
  205. * there is no pending bio, then this is a no-op.
  206. */
  207. void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw)
  208. {
  209. if (sdp->sd_log_bio) {
  210. atomic_inc(&sdp->sd_log_in_flight);
  211. submit_bio(rw, sdp->sd_log_bio);
  212. sdp->sd_log_bio = NULL;
  213. }
  214. }
  215. /**
  216. * gfs2_log_alloc_bio - Allocate a new bio for log writing
  217. * @sdp: The superblock
  218. * @blkno: The next device block number we want to write to
  219. *
  220. * This should never be called when there is a cached bio in the
  221. * super block. When it returns, there will be a cached bio in the
  222. * super block which will have as many bio_vecs as the device is
  223. * happy to handle.
  224. *
  225. * Returns: Newly allocated bio
  226. */
  227. static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
  228. {
  229. struct super_block *sb = sdp->sd_vfs;
  230. unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev);
  231. struct bio *bio;
  232. BUG_ON(sdp->sd_log_bio);
  233. while (1) {
  234. bio = bio_alloc(GFP_NOIO, nrvecs);
  235. if (likely(bio))
  236. break;
  237. nrvecs = max(nrvecs/2, 1U);
  238. }
  239. bio->bi_sector = blkno * (sb->s_blocksize >> 9);
  240. bio->bi_bdev = sb->s_bdev;
  241. bio->bi_end_io = gfs2_end_log_write;
  242. bio->bi_private = sdp;
  243. sdp->sd_log_bio = bio;
  244. return bio;
  245. }
  246. /**
  247. * gfs2_log_get_bio - Get cached log bio, or allocate a new one
  248. * @sdp: The superblock
  249. * @blkno: The device block number we want to write to
  250. *
  251. * If there is a cached bio, then if the next block number is sequential
  252. * with the previous one, return it, otherwise flush the bio to the
  253. * device. If there is not a cached bio, or we just flushed it, then
  254. * allocate a new one.
  255. *
  256. * Returns: The bio to use for log writes
  257. */
  258. static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
  259. {
  260. struct bio *bio = sdp->sd_log_bio;
  261. u64 nblk;
  262. if (bio) {
  263. nblk = bio->bi_sector + bio_sectors(bio);
  264. nblk >>= sdp->sd_fsb2bb_shift;
  265. if (blkno == nblk)
  266. return bio;
  267. gfs2_log_flush_bio(sdp, WRITE);
  268. }
  269. return gfs2_log_alloc_bio(sdp, blkno);
  270. }
  271. /**
  272. * gfs2_log_write - write to log
  273. * @sdp: the filesystem
  274. * @page: the page to write
  275. * @size: the size of the data to write
  276. * @offset: the offset within the page
  277. *
  278. * Try and add the page segment to the current bio. If that fails,
  279. * submit the current bio to the device and create a new one, and
  280. * then add the page segment to that.
  281. */
  282. static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
  283. unsigned size, unsigned offset)
  284. {
  285. u64 blkno = gfs2_log_bmap(sdp);
  286. struct bio *bio;
  287. int ret;
  288. bio = gfs2_log_get_bio(sdp, blkno);
  289. ret = bio_add_page(bio, page, size, offset);
  290. if (ret == 0) {
  291. gfs2_log_flush_bio(sdp, WRITE);
  292. bio = gfs2_log_alloc_bio(sdp, blkno);
  293. ret = bio_add_page(bio, page, size, offset);
  294. WARN_ON(ret == 0);
  295. }
  296. }
  297. /**
  298. * gfs2_log_write_bh - write a buffer's content to the log
  299. * @sdp: The super block
  300. * @bh: The buffer pointing to the in-place location
  301. *
  302. * This writes the content of the buffer to the next available location
  303. * in the log. The buffer will be unlocked once the i/o to the log has
  304. * completed.
  305. */
  306. static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
  307. {
  308. gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
  309. }
  310. /**
  311. * gfs2_log_write_page - write one block stored in a page, into the log
  312. * @sdp: The superblock
  313. * @page: The struct page
  314. *
  315. * This writes the first block-sized part of the page into the log. Note
  316. * that the page must have been allocated from the gfs2_page_pool mempool
  317. * and that after this has been called, ownership has been transferred and
  318. * the page may be freed at any time.
  319. */
  320. void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
  321. {
  322. struct super_block *sb = sdp->sd_vfs;
  323. gfs2_log_write(sdp, page, sb->s_blocksize, 0);
  324. }
  325. static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
  326. u32 ld_length, u32 ld_data1)
  327. {
  328. struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
  329. struct gfs2_log_descriptor *ld = page_address(page);
  330. clear_page(ld);
  331. ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  332. ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
  333. ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
  334. ld->ld_type = cpu_to_be32(ld_type);
  335. ld->ld_length = cpu_to_be32(ld_length);
  336. ld->ld_data1 = cpu_to_be32(ld_data1);
  337. ld->ld_data2 = 0;
  338. return page;
  339. }
  340. static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
  341. {
  342. struct gfs2_meta_header *mh;
  343. struct gfs2_trans *tr;
  344. lock_buffer(bd->bd_bh);
  345. gfs2_log_lock(sdp);
  346. tr = current->journal_info;
  347. tr->tr_touched = 1;
  348. if (!list_empty(&bd->bd_list))
  349. goto out;
  350. set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
  351. set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
  352. mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
  353. if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) {
  354. printk(KERN_ERR
  355. "Attempting to add uninitialised block to journal (inplace block=%lld)\n",
  356. (unsigned long long)bd->bd_bh->b_blocknr);
  357. BUG();
  358. }
  359. gfs2_pin(sdp, bd->bd_bh);
  360. mh->__pad0 = cpu_to_be64(0);
  361. mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
  362. sdp->sd_log_num_buf++;
  363. list_add(&bd->bd_list, &sdp->sd_log_le_buf);
  364. tr->tr_num_buf_new++;
  365. out:
  366. gfs2_log_unlock(sdp);
  367. unlock_buffer(bd->bd_bh);
  368. }
  369. static void gfs2_check_magic(struct buffer_head *bh)
  370. {
  371. void *kaddr;
  372. __be32 *ptr;
  373. clear_buffer_escaped(bh);
  374. kaddr = kmap_atomic(bh->b_page);
  375. ptr = kaddr + bh_offset(bh);
  376. if (*ptr == cpu_to_be32(GFS2_MAGIC))
  377. set_buffer_escaped(bh);
  378. kunmap_atomic(kaddr);
  379. }
  380. static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
  381. unsigned int total, struct list_head *blist,
  382. bool is_databuf)
  383. {
  384. struct gfs2_log_descriptor *ld;
  385. struct gfs2_bufdata *bd1 = NULL, *bd2;
  386. struct page *page;
  387. unsigned int num;
  388. unsigned n;
  389. __be64 *ptr;
  390. gfs2_log_lock(sdp);
  391. bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
  392. while(total) {
  393. num = total;
  394. if (total > limit)
  395. num = limit;
  396. gfs2_log_unlock(sdp);
  397. page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num);
  398. ld = page_address(page);
  399. gfs2_log_lock(sdp);
  400. ptr = (__be64 *)(ld + 1);
  401. n = 0;
  402. list_for_each_entry_continue(bd1, blist, bd_list) {
  403. *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
  404. if (is_databuf) {
  405. gfs2_check_magic(bd1->bd_bh);
  406. *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
  407. }
  408. if (++n >= num)
  409. break;
  410. }
  411. gfs2_log_unlock(sdp);
  412. gfs2_log_write_page(sdp, page);
  413. gfs2_log_lock(sdp);
  414. n = 0;
  415. list_for_each_entry_continue(bd2, blist, bd_list) {
  416. get_bh(bd2->bd_bh);
  417. gfs2_log_unlock(sdp);
  418. lock_buffer(bd2->bd_bh);
  419. if (buffer_escaped(bd2->bd_bh)) {
  420. void *kaddr;
  421. page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
  422. ptr = page_address(page);
  423. kaddr = kmap_atomic(bd2->bd_bh->b_page);
  424. memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
  425. bd2->bd_bh->b_size);
  426. kunmap_atomic(kaddr);
  427. *(__be32 *)ptr = 0;
  428. clear_buffer_escaped(bd2->bd_bh);
  429. unlock_buffer(bd2->bd_bh);
  430. brelse(bd2->bd_bh);
  431. gfs2_log_write_page(sdp, page);
  432. } else {
  433. gfs2_log_write_bh(sdp, bd2->bd_bh);
  434. }
  435. gfs2_log_lock(sdp);
  436. if (++n >= num)
  437. break;
  438. }
  439. BUG_ON(total < num);
  440. total -= num;
  441. }
  442. gfs2_log_unlock(sdp);
  443. }
  444. static void buf_lo_before_commit(struct gfs2_sbd *sdp)
  445. {
  446. unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
  447. gfs2_before_commit(sdp, limit, sdp->sd_log_num_buf,
  448. &sdp->sd_log_le_buf, 0);
  449. }
  450. static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  451. {
  452. struct list_head *head = &sdp->sd_log_le_buf;
  453. struct gfs2_bufdata *bd;
  454. while (!list_empty(head)) {
  455. bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
  456. list_del_init(&bd->bd_list);
  457. sdp->sd_log_num_buf--;
  458. gfs2_unpin(sdp, bd->bd_bh, ai);
  459. }
  460. gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
  461. }
  462. static void buf_lo_before_scan(struct gfs2_jdesc *jd,
  463. struct gfs2_log_header_host *head, int pass)
  464. {
  465. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  466. if (pass != 0)
  467. return;
  468. sdp->sd_found_blocks = 0;
  469. sdp->sd_replayed_blocks = 0;
  470. }
  471. static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  472. struct gfs2_log_descriptor *ld, __be64 *ptr,
  473. int pass)
  474. {
  475. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  476. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  477. struct gfs2_glock *gl = ip->i_gl;
  478. unsigned int blks = be32_to_cpu(ld->ld_data1);
  479. struct buffer_head *bh_log, *bh_ip;
  480. u64 blkno;
  481. int error = 0;
  482. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
  483. return 0;
  484. gfs2_replay_incr_blk(sdp, &start);
  485. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  486. blkno = be64_to_cpu(*ptr++);
  487. sdp->sd_found_blocks++;
  488. if (gfs2_revoke_check(sdp, blkno, start))
  489. continue;
  490. error = gfs2_replay_read_block(jd, start, &bh_log);
  491. if (error)
  492. return error;
  493. bh_ip = gfs2_meta_new(gl, blkno);
  494. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  495. if (gfs2_meta_check(sdp, bh_ip))
  496. error = -EIO;
  497. else
  498. mark_buffer_dirty(bh_ip);
  499. brelse(bh_log);
  500. brelse(bh_ip);
  501. if (error)
  502. break;
  503. sdp->sd_replayed_blocks++;
  504. }
  505. return error;
  506. }
  507. static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  508. {
  509. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  510. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  511. if (error) {
  512. gfs2_meta_sync(ip->i_gl);
  513. return;
  514. }
  515. if (pass != 1)
  516. return;
  517. gfs2_meta_sync(ip->i_gl);
  518. fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
  519. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  520. }
  521. static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
  522. {
  523. struct gfs2_glock *gl = bd->bd_gl;
  524. struct gfs2_trans *tr;
  525. tr = current->journal_info;
  526. tr->tr_touched = 1;
  527. tr->tr_num_revoke++;
  528. sdp->sd_log_num_revoke++;
  529. atomic_inc(&gl->gl_revokes);
  530. set_bit(GLF_LFLUSH, &gl->gl_flags);
  531. list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
  532. }
  533. static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
  534. {
  535. struct gfs2_log_descriptor *ld;
  536. struct gfs2_meta_header *mh;
  537. unsigned int offset;
  538. struct list_head *head = &sdp->sd_log_le_revoke;
  539. struct gfs2_bufdata *bd;
  540. struct page *page;
  541. unsigned int length;
  542. if (!sdp->sd_log_num_revoke)
  543. return;
  544. length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
  545. page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
  546. ld = page_address(page);
  547. offset = sizeof(struct gfs2_log_descriptor);
  548. list_for_each_entry(bd, head, bd_list) {
  549. sdp->sd_log_num_revoke--;
  550. if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
  551. gfs2_log_write_page(sdp, page);
  552. page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
  553. mh = page_address(page);
  554. clear_page(mh);
  555. mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
  556. mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
  557. mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
  558. offset = sizeof(struct gfs2_meta_header);
  559. }
  560. *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
  561. offset += sizeof(u64);
  562. }
  563. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
  564. gfs2_log_write_page(sdp, page);
  565. }
  566. static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  567. {
  568. struct list_head *head = &sdp->sd_log_le_revoke;
  569. struct gfs2_bufdata *bd;
  570. struct gfs2_glock *gl;
  571. while (!list_empty(head)) {
  572. bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
  573. list_del_init(&bd->bd_list);
  574. gl = bd->bd_gl;
  575. atomic_dec(&gl->gl_revokes);
  576. clear_bit(GLF_LFLUSH, &gl->gl_flags);
  577. kmem_cache_free(gfs2_bufdata_cachep, bd);
  578. }
  579. }
  580. static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
  581. struct gfs2_log_header_host *head, int pass)
  582. {
  583. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  584. if (pass != 0)
  585. return;
  586. sdp->sd_found_revokes = 0;
  587. sdp->sd_replay_tail = head->lh_tail;
  588. }
  589. static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  590. struct gfs2_log_descriptor *ld, __be64 *ptr,
  591. int pass)
  592. {
  593. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  594. unsigned int blks = be32_to_cpu(ld->ld_length);
  595. unsigned int revokes = be32_to_cpu(ld->ld_data1);
  596. struct buffer_head *bh;
  597. unsigned int offset;
  598. u64 blkno;
  599. int first = 1;
  600. int error;
  601. if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
  602. return 0;
  603. offset = sizeof(struct gfs2_log_descriptor);
  604. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  605. error = gfs2_replay_read_block(jd, start, &bh);
  606. if (error)
  607. return error;
  608. if (!first)
  609. gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
  610. while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
  611. blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
  612. error = gfs2_revoke_add(sdp, blkno, start);
  613. if (error < 0) {
  614. brelse(bh);
  615. return error;
  616. }
  617. else if (error)
  618. sdp->sd_found_revokes++;
  619. if (!--revokes)
  620. break;
  621. offset += sizeof(u64);
  622. }
  623. brelse(bh);
  624. offset = sizeof(struct gfs2_meta_header);
  625. first = 0;
  626. }
  627. return 0;
  628. }
  629. static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  630. {
  631. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  632. if (error) {
  633. gfs2_revoke_clean(sdp);
  634. return;
  635. }
  636. if (pass != 1)
  637. return;
  638. fs_info(sdp, "jid=%u: Found %u revoke tags\n",
  639. jd->jd_jid, sdp->sd_found_revokes);
  640. gfs2_revoke_clean(sdp);
  641. }
  642. /**
  643. * databuf_lo_add - Add a databuf to the transaction.
  644. *
  645. * This is used in two distinct cases:
  646. * i) In ordered write mode
  647. * We put the data buffer on a list so that we can ensure that its
  648. * synced to disk at the right time
  649. * ii) In journaled data mode
  650. * We need to journal the data block in the same way as metadata in
  651. * the functions above. The difference is that here we have a tag
  652. * which is two __be64's being the block number (as per meta data)
  653. * and a flag which says whether the data block needs escaping or
  654. * not. This means we need a new log entry for each 251 or so data
  655. * blocks, which isn't an enormous overhead but twice as much as
  656. * for normal metadata blocks.
  657. */
  658. static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
  659. {
  660. struct gfs2_trans *tr = current->journal_info;
  661. struct address_space *mapping = bd->bd_bh->b_page->mapping;
  662. struct gfs2_inode *ip = GFS2_I(mapping->host);
  663. lock_buffer(bd->bd_bh);
  664. gfs2_log_lock(sdp);
  665. if (tr)
  666. tr->tr_touched = 1;
  667. if (!list_empty(&bd->bd_list))
  668. goto out;
  669. set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
  670. set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
  671. if (gfs2_is_jdata(ip)) {
  672. gfs2_pin(sdp, bd->bd_bh);
  673. tr->tr_num_databuf_new++;
  674. sdp->sd_log_num_databuf++;
  675. list_add_tail(&bd->bd_list, &sdp->sd_log_le_databuf);
  676. } else {
  677. list_add_tail(&bd->bd_list, &sdp->sd_log_le_ordered);
  678. }
  679. out:
  680. gfs2_log_unlock(sdp);
  681. unlock_buffer(bd->bd_bh);
  682. }
  683. /**
  684. * databuf_lo_before_commit - Scan the data buffers, writing as we go
  685. *
  686. */
  687. static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
  688. {
  689. unsigned int limit = buf_limit(sdp) / 2;
  690. gfs2_before_commit(sdp, limit, sdp->sd_log_num_databuf,
  691. &sdp->sd_log_le_databuf, 1);
  692. }
  693. static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  694. struct gfs2_log_descriptor *ld,
  695. __be64 *ptr, int pass)
  696. {
  697. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  698. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  699. struct gfs2_glock *gl = ip->i_gl;
  700. unsigned int blks = be32_to_cpu(ld->ld_data1);
  701. struct buffer_head *bh_log, *bh_ip;
  702. u64 blkno;
  703. u64 esc;
  704. int error = 0;
  705. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
  706. return 0;
  707. gfs2_replay_incr_blk(sdp, &start);
  708. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  709. blkno = be64_to_cpu(*ptr++);
  710. esc = be64_to_cpu(*ptr++);
  711. sdp->sd_found_blocks++;
  712. if (gfs2_revoke_check(sdp, blkno, start))
  713. continue;
  714. error = gfs2_replay_read_block(jd, start, &bh_log);
  715. if (error)
  716. return error;
  717. bh_ip = gfs2_meta_new(gl, blkno);
  718. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  719. /* Unescape */
  720. if (esc) {
  721. __be32 *eptr = (__be32 *)bh_ip->b_data;
  722. *eptr = cpu_to_be32(GFS2_MAGIC);
  723. }
  724. mark_buffer_dirty(bh_ip);
  725. brelse(bh_log);
  726. brelse(bh_ip);
  727. sdp->sd_replayed_blocks++;
  728. }
  729. return error;
  730. }
  731. /* FIXME: sort out accounting for log blocks etc. */
  732. static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  733. {
  734. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  735. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  736. if (error) {
  737. gfs2_meta_sync(ip->i_gl);
  738. return;
  739. }
  740. if (pass != 1)
  741. return;
  742. /* data sync? */
  743. gfs2_meta_sync(ip->i_gl);
  744. fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
  745. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  746. }
  747. static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  748. {
  749. struct list_head *head = &sdp->sd_log_le_databuf;
  750. struct gfs2_bufdata *bd;
  751. while (!list_empty(head)) {
  752. bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
  753. list_del_init(&bd->bd_list);
  754. sdp->sd_log_num_databuf--;
  755. gfs2_unpin(sdp, bd->bd_bh, ai);
  756. }
  757. gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
  758. }
  759. const struct gfs2_log_operations gfs2_buf_lops = {
  760. .lo_add = buf_lo_add,
  761. .lo_before_commit = buf_lo_before_commit,
  762. .lo_after_commit = buf_lo_after_commit,
  763. .lo_before_scan = buf_lo_before_scan,
  764. .lo_scan_elements = buf_lo_scan_elements,
  765. .lo_after_scan = buf_lo_after_scan,
  766. .lo_name = "buf",
  767. };
  768. const struct gfs2_log_operations gfs2_revoke_lops = {
  769. .lo_add = revoke_lo_add,
  770. .lo_before_commit = revoke_lo_before_commit,
  771. .lo_after_commit = revoke_lo_after_commit,
  772. .lo_before_scan = revoke_lo_before_scan,
  773. .lo_scan_elements = revoke_lo_scan_elements,
  774. .lo_after_scan = revoke_lo_after_scan,
  775. .lo_name = "revoke",
  776. };
  777. const struct gfs2_log_operations gfs2_rg_lops = {
  778. .lo_name = "rg",
  779. };
  780. const struct gfs2_log_operations gfs2_databuf_lops = {
  781. .lo_add = databuf_lo_add,
  782. .lo_before_commit = databuf_lo_before_commit,
  783. .lo_after_commit = databuf_lo_after_commit,
  784. .lo_scan_elements = databuf_lo_scan_elements,
  785. .lo_after_scan = databuf_lo_after_scan,
  786. .lo_name = "databuf",
  787. };
  788. const struct gfs2_log_operations *gfs2_log_ops[] = {
  789. &gfs2_databuf_lops,
  790. &gfs2_buf_lops,
  791. &gfs2_rg_lops,
  792. &gfs2_revoke_lops,
  793. NULL,
  794. };