lops.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/gfs2_ondisk.h>
  15. #include <linux/bio.h>
  16. #include <linux/fs.h>
  17. #include "gfs2.h"
  18. #include "incore.h"
  19. #include "inode.h"
  20. #include "glock.h"
  21. #include "log.h"
  22. #include "lops.h"
  23. #include "meta_io.h"
  24. #include "recovery.h"
  25. #include "rgrp.h"
  26. #include "trans.h"
  27. #include "util.h"
  28. #include "trace_gfs2.h"
  29. /**
  30. * gfs2_pin - Pin a buffer in memory
  31. * @sdp: The superblock
  32. * @bh: The buffer to be pinned
  33. *
  34. * The log lock must be held when calling this function
  35. */
  36. static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
  37. {
  38. struct gfs2_bufdata *bd;
  39. BUG_ON(!current->journal_info);
  40. clear_buffer_dirty(bh);
  41. if (test_set_buffer_pinned(bh))
  42. gfs2_assert_withdraw(sdp, 0);
  43. if (!buffer_uptodate(bh))
  44. gfs2_io_error_bh(sdp, bh);
  45. bd = bh->b_private;
  46. /* If this buffer is in the AIL and it has already been written
  47. * to in-place disk block, remove it from the AIL.
  48. */
  49. spin_lock(&sdp->sd_ail_lock);
  50. if (bd->bd_ail)
  51. list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
  52. spin_unlock(&sdp->sd_ail_lock);
  53. get_bh(bh);
  54. atomic_inc(&sdp->sd_log_pinned);
  55. trace_gfs2_pin(bd, 1);
  56. }
  57. static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
  58. {
  59. return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
  60. }
  61. static void maybe_release_space(struct gfs2_bufdata *bd)
  62. {
  63. struct gfs2_glock *gl = bd->bd_gl;
  64. struct gfs2_sbd *sdp = gl->gl_sbd;
  65. struct gfs2_rgrpd *rgd = gl->gl_object;
  66. unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
  67. struct gfs2_bitmap *bi = rgd->rd_bits + index;
  68. if (bi->bi_clone == 0)
  69. return;
  70. if (sdp->sd_args.ar_discard)
  71. gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi);
  72. memcpy(bi->bi_clone + bi->bi_offset,
  73. bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
  74. clear_bit(GBF_FULL, &bi->bi_flags);
  75. rgd->rd_free_clone = rgd->rd_free;
  76. }
  77. /**
  78. * gfs2_unpin - Unpin a buffer
  79. * @sdp: the filesystem the buffer belongs to
  80. * @bh: The buffer to unpin
  81. * @ai:
  82. * @flags: The inode dirty flags
  83. *
  84. */
  85. static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
  86. struct gfs2_ail *ai)
  87. {
  88. struct gfs2_bufdata *bd = bh->b_private;
  89. BUG_ON(!buffer_uptodate(bh));
  90. BUG_ON(!buffer_pinned(bh));
  91. lock_buffer(bh);
  92. mark_buffer_dirty(bh);
  93. clear_buffer_pinned(bh);
  94. if (buffer_is_rgrp(bd))
  95. maybe_release_space(bd);
  96. spin_lock(&sdp->sd_ail_lock);
  97. if (bd->bd_ail) {
  98. list_del(&bd->bd_ail_st_list);
  99. brelse(bh);
  100. } else {
  101. struct gfs2_glock *gl = bd->bd_gl;
  102. list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
  103. atomic_inc(&gl->gl_ail_count);
  104. }
  105. bd->bd_ail = ai;
  106. list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
  107. spin_unlock(&sdp->sd_ail_lock);
  108. clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
  109. trace_gfs2_pin(bd, 0);
  110. unlock_buffer(bh);
  111. atomic_dec(&sdp->sd_log_pinned);
  112. }
  113. static inline struct gfs2_log_descriptor *bh_log_desc(struct buffer_head *bh)
  114. {
  115. return (struct gfs2_log_descriptor *)bh->b_data;
  116. }
  117. static inline __be64 *bh_log_ptr(struct buffer_head *bh)
  118. {
  119. struct gfs2_log_descriptor *ld = bh_log_desc(bh);
  120. return (__force __be64 *)(ld + 1);
  121. }
  122. static inline __be64 *bh_ptr_end(struct buffer_head *bh)
  123. {
  124. return (__force __be64 *)(bh->b_data + bh->b_size);
  125. }
  126. /**
  127. * gfs2_log_write_endio - End of I/O for a log buffer
  128. * @bh: The buffer head
  129. * @uptodate: I/O Status
  130. *
  131. */
  132. static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate)
  133. {
  134. struct gfs2_sbd *sdp = bh->b_private;
  135. bh->b_private = NULL;
  136. end_buffer_write_sync(bh, uptodate);
  137. if (atomic_dec_and_test(&sdp->sd_log_in_flight))
  138. wake_up(&sdp->sd_log_flush_wait);
  139. }
  140. /**
  141. * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
  142. * @sdp: The GFS2 superblock
  143. *
  144. * tReturns: the buffer_head
  145. */
  146. static struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
  147. {
  148. u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head);
  149. struct buffer_head *bh;
  150. bh = sb_getblk(sdp->sd_vfs, blkno);
  151. lock_buffer(bh);
  152. memset(bh->b_data, 0, bh->b_size);
  153. set_buffer_uptodate(bh);
  154. clear_buffer_dirty(bh);
  155. gfs2_log_incr_head(sdp);
  156. atomic_inc(&sdp->sd_log_in_flight);
  157. bh->b_private = sdp;
  158. bh->b_end_io = gfs2_log_write_endio;
  159. return bh;
  160. }
  161. /**
  162. * gfs2_fake_write_endio -
  163. * @bh: The buffer head
  164. * @uptodate: The I/O Status
  165. *
  166. */
  167. static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate)
  168. {
  169. struct buffer_head *real_bh = bh->b_private;
  170. struct gfs2_bufdata *bd = real_bh->b_private;
  171. struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd;
  172. end_buffer_write_sync(bh, uptodate);
  173. free_buffer_head(bh);
  174. unlock_buffer(real_bh);
  175. brelse(real_bh);
  176. if (atomic_dec_and_test(&sdp->sd_log_in_flight))
  177. wake_up(&sdp->sd_log_flush_wait);
  178. }
  179. /**
  180. * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
  181. * @sdp: the filesystem
  182. * @data: the data the buffer_head should point to
  183. *
  184. * Returns: the log buffer descriptor
  185. */
  186. static struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
  187. struct buffer_head *real)
  188. {
  189. u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head);
  190. struct buffer_head *bh;
  191. bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
  192. atomic_set(&bh->b_count, 1);
  193. bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock);
  194. set_bh_page(bh, real->b_page, bh_offset(real));
  195. bh->b_blocknr = blkno;
  196. bh->b_size = sdp->sd_sb.sb_bsize;
  197. bh->b_bdev = sdp->sd_vfs->s_bdev;
  198. bh->b_private = real;
  199. bh->b_end_io = gfs2_fake_write_endio;
  200. gfs2_log_incr_head(sdp);
  201. atomic_inc(&sdp->sd_log_in_flight);
  202. return bh;
  203. }
  204. static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
  205. {
  206. struct buffer_head *bh = gfs2_log_get_buf(sdp);
  207. struct gfs2_log_descriptor *ld = bh_log_desc(bh);
  208. ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  209. ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
  210. ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
  211. ld->ld_type = cpu_to_be32(ld_type);
  212. ld->ld_length = 0;
  213. ld->ld_data1 = 0;
  214. ld->ld_data2 = 0;
  215. memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
  216. return bh;
  217. }
  218. static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  219. {
  220. struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
  221. struct gfs2_meta_header *mh;
  222. struct gfs2_trans *tr;
  223. lock_buffer(bd->bd_bh);
  224. gfs2_log_lock(sdp);
  225. if (!list_empty(&bd->bd_list_tr))
  226. goto out;
  227. tr = current->journal_info;
  228. tr->tr_touched = 1;
  229. tr->tr_num_buf++;
  230. list_add(&bd->bd_list_tr, &tr->tr_list_buf);
  231. if (!list_empty(&le->le_list))
  232. goto out;
  233. set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
  234. set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
  235. gfs2_meta_check(sdp, bd->bd_bh);
  236. gfs2_pin(sdp, bd->bd_bh);
  237. mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
  238. mh->__pad0 = cpu_to_be64(0);
  239. mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
  240. sdp->sd_log_num_buf++;
  241. list_add(&le->le_list, &sdp->sd_log_le_buf);
  242. tr->tr_num_buf_new++;
  243. out:
  244. gfs2_log_unlock(sdp);
  245. unlock_buffer(bd->bd_bh);
  246. }
  247. static void buf_lo_before_commit(struct gfs2_sbd *sdp)
  248. {
  249. struct buffer_head *bh;
  250. struct gfs2_log_descriptor *ld;
  251. struct gfs2_bufdata *bd1 = NULL, *bd2;
  252. unsigned int total;
  253. unsigned int limit;
  254. unsigned int num;
  255. unsigned n;
  256. __be64 *ptr;
  257. limit = buf_limit(sdp);
  258. /* for 4k blocks, limit = 503 */
  259. gfs2_log_lock(sdp);
  260. total = sdp->sd_log_num_buf;
  261. bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
  262. while(total) {
  263. num = total;
  264. if (total > limit)
  265. num = limit;
  266. gfs2_log_unlock(sdp);
  267. bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA);
  268. gfs2_log_lock(sdp);
  269. ld = bh_log_desc(bh);
  270. ptr = bh_log_ptr(bh);
  271. ld->ld_length = cpu_to_be32(num + 1);
  272. ld->ld_data1 = cpu_to_be32(num);
  273. n = 0;
  274. list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
  275. bd_le.le_list) {
  276. *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
  277. if (++n >= num)
  278. break;
  279. }
  280. gfs2_log_unlock(sdp);
  281. submit_bh(WRITE_SYNC, bh);
  282. gfs2_log_lock(sdp);
  283. n = 0;
  284. list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
  285. bd_le.le_list) {
  286. get_bh(bd2->bd_bh);
  287. gfs2_log_unlock(sdp);
  288. lock_buffer(bd2->bd_bh);
  289. bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
  290. submit_bh(WRITE_SYNC, bh);
  291. gfs2_log_lock(sdp);
  292. if (++n >= num)
  293. break;
  294. }
  295. BUG_ON(total < num);
  296. total -= num;
  297. }
  298. gfs2_log_unlock(sdp);
  299. }
  300. static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  301. {
  302. struct list_head *head = &sdp->sd_log_le_buf;
  303. struct gfs2_bufdata *bd;
  304. while (!list_empty(head)) {
  305. bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
  306. list_del_init(&bd->bd_le.le_list);
  307. sdp->sd_log_num_buf--;
  308. gfs2_unpin(sdp, bd->bd_bh, ai);
  309. }
  310. gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
  311. }
  312. static void buf_lo_before_scan(struct gfs2_jdesc *jd,
  313. struct gfs2_log_header_host *head, int pass)
  314. {
  315. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  316. if (pass != 0)
  317. return;
  318. sdp->sd_found_blocks = 0;
  319. sdp->sd_replayed_blocks = 0;
  320. }
  321. static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  322. struct gfs2_log_descriptor *ld, __be64 *ptr,
  323. int pass)
  324. {
  325. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  326. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  327. struct gfs2_glock *gl = ip->i_gl;
  328. unsigned int blks = be32_to_cpu(ld->ld_data1);
  329. struct buffer_head *bh_log, *bh_ip;
  330. u64 blkno;
  331. int error = 0;
  332. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
  333. return 0;
  334. gfs2_replay_incr_blk(sdp, &start);
  335. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  336. blkno = be64_to_cpu(*ptr++);
  337. sdp->sd_found_blocks++;
  338. if (gfs2_revoke_check(sdp, blkno, start))
  339. continue;
  340. error = gfs2_replay_read_block(jd, start, &bh_log);
  341. if (error)
  342. return error;
  343. bh_ip = gfs2_meta_new(gl, blkno);
  344. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  345. if (gfs2_meta_check(sdp, bh_ip))
  346. error = -EIO;
  347. else
  348. mark_buffer_dirty(bh_ip);
  349. brelse(bh_log);
  350. brelse(bh_ip);
  351. if (error)
  352. break;
  353. sdp->sd_replayed_blocks++;
  354. }
  355. return error;
  356. }
  357. static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  358. {
  359. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  360. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  361. if (error) {
  362. gfs2_meta_sync(ip->i_gl);
  363. return;
  364. }
  365. if (pass != 1)
  366. return;
  367. gfs2_meta_sync(ip->i_gl);
  368. fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
  369. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  370. }
  371. static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  372. {
  373. struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
  374. struct gfs2_glock *gl = bd->bd_gl;
  375. struct gfs2_trans *tr;
  376. tr = current->journal_info;
  377. tr->tr_touched = 1;
  378. tr->tr_num_revoke++;
  379. sdp->sd_log_num_revoke++;
  380. atomic_inc(&gl->gl_revokes);
  381. set_bit(GLF_LFLUSH, &gl->gl_flags);
  382. list_add(&le->le_list, &sdp->sd_log_le_revoke);
  383. }
  384. static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
  385. {
  386. struct gfs2_log_descriptor *ld;
  387. struct gfs2_meta_header *mh;
  388. struct buffer_head *bh;
  389. unsigned int offset;
  390. struct list_head *head = &sdp->sd_log_le_revoke;
  391. struct gfs2_bufdata *bd;
  392. if (!sdp->sd_log_num_revoke)
  393. return;
  394. bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE);
  395. ld = bh_log_desc(bh);
  396. ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
  397. sizeof(u64)));
  398. ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
  399. offset = sizeof(struct gfs2_log_descriptor);
  400. list_for_each_entry(bd, head, bd_le.le_list) {
  401. sdp->sd_log_num_revoke--;
  402. if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
  403. submit_bh(WRITE_SYNC, bh);
  404. bh = gfs2_log_get_buf(sdp);
  405. mh = (struct gfs2_meta_header *)bh->b_data;
  406. mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
  407. mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
  408. mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
  409. offset = sizeof(struct gfs2_meta_header);
  410. }
  411. *(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno);
  412. offset += sizeof(u64);
  413. }
  414. gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
  415. submit_bh(WRITE_SYNC, bh);
  416. }
  417. static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  418. {
  419. struct list_head *head = &sdp->sd_log_le_revoke;
  420. struct gfs2_bufdata *bd;
  421. struct gfs2_glock *gl;
  422. while (!list_empty(head)) {
  423. bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
  424. list_del_init(&bd->bd_le.le_list);
  425. gl = bd->bd_gl;
  426. atomic_dec(&gl->gl_revokes);
  427. clear_bit(GLF_LFLUSH, &gl->gl_flags);
  428. kmem_cache_free(gfs2_bufdata_cachep, bd);
  429. }
  430. }
  431. static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
  432. struct gfs2_log_header_host *head, int pass)
  433. {
  434. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  435. if (pass != 0)
  436. return;
  437. sdp->sd_found_revokes = 0;
  438. sdp->sd_replay_tail = head->lh_tail;
  439. }
  440. static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  441. struct gfs2_log_descriptor *ld, __be64 *ptr,
  442. int pass)
  443. {
  444. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  445. unsigned int blks = be32_to_cpu(ld->ld_length);
  446. unsigned int revokes = be32_to_cpu(ld->ld_data1);
  447. struct buffer_head *bh;
  448. unsigned int offset;
  449. u64 blkno;
  450. int first = 1;
  451. int error;
  452. if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
  453. return 0;
  454. offset = sizeof(struct gfs2_log_descriptor);
  455. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  456. error = gfs2_replay_read_block(jd, start, &bh);
  457. if (error)
  458. return error;
  459. if (!first)
  460. gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
  461. while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
  462. blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
  463. error = gfs2_revoke_add(sdp, blkno, start);
  464. if (error < 0) {
  465. brelse(bh);
  466. return error;
  467. }
  468. else if (error)
  469. sdp->sd_found_revokes++;
  470. if (!--revokes)
  471. break;
  472. offset += sizeof(u64);
  473. }
  474. brelse(bh);
  475. offset = sizeof(struct gfs2_meta_header);
  476. first = 0;
  477. }
  478. return 0;
  479. }
  480. static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  481. {
  482. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  483. if (error) {
  484. gfs2_revoke_clean(sdp);
  485. return;
  486. }
  487. if (pass != 1)
  488. return;
  489. fs_info(sdp, "jid=%u: Found %u revoke tags\n",
  490. jd->jd_jid, sdp->sd_found_revokes);
  491. gfs2_revoke_clean(sdp);
  492. }
  493. /**
  494. * databuf_lo_add - Add a databuf to the transaction.
  495. *
  496. * This is used in two distinct cases:
  497. * i) In ordered write mode
  498. * We put the data buffer on a list so that we can ensure that its
  499. * synced to disk at the right time
  500. * ii) In journaled data mode
  501. * We need to journal the data block in the same way as metadata in
  502. * the functions above. The difference is that here we have a tag
  503. * which is two __be64's being the block number (as per meta data)
  504. * and a flag which says whether the data block needs escaping or
  505. * not. This means we need a new log entry for each 251 or so data
  506. * blocks, which isn't an enormous overhead but twice as much as
  507. * for normal metadata blocks.
  508. */
  509. static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
  510. {
  511. struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
  512. struct gfs2_trans *tr = current->journal_info;
  513. struct address_space *mapping = bd->bd_bh->b_page->mapping;
  514. struct gfs2_inode *ip = GFS2_I(mapping->host);
  515. lock_buffer(bd->bd_bh);
  516. gfs2_log_lock(sdp);
  517. if (tr) {
  518. if (!list_empty(&bd->bd_list_tr))
  519. goto out;
  520. tr->tr_touched = 1;
  521. if (gfs2_is_jdata(ip)) {
  522. tr->tr_num_buf++;
  523. list_add(&bd->bd_list_tr, &tr->tr_list_buf);
  524. }
  525. }
  526. if (!list_empty(&le->le_list))
  527. goto out;
  528. set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
  529. set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
  530. if (gfs2_is_jdata(ip)) {
  531. gfs2_pin(sdp, bd->bd_bh);
  532. tr->tr_num_databuf_new++;
  533. sdp->sd_log_num_databuf++;
  534. list_add_tail(&le->le_list, &sdp->sd_log_le_databuf);
  535. } else {
  536. list_add_tail(&le->le_list, &sdp->sd_log_le_ordered);
  537. }
  538. out:
  539. gfs2_log_unlock(sdp);
  540. unlock_buffer(bd->bd_bh);
  541. }
  542. static void gfs2_check_magic(struct buffer_head *bh)
  543. {
  544. void *kaddr;
  545. __be32 *ptr;
  546. clear_buffer_escaped(bh);
  547. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  548. ptr = kaddr + bh_offset(bh);
  549. if (*ptr == cpu_to_be32(GFS2_MAGIC))
  550. set_buffer_escaped(bh);
  551. kunmap_atomic(kaddr, KM_USER0);
  552. }
  553. static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
  554. struct list_head *list, struct list_head *done,
  555. unsigned int n)
  556. {
  557. struct buffer_head *bh1;
  558. struct gfs2_log_descriptor *ld;
  559. struct gfs2_bufdata *bd;
  560. __be64 *ptr;
  561. if (!bh)
  562. return;
  563. ld = bh_log_desc(bh);
  564. ld->ld_length = cpu_to_be32(n + 1);
  565. ld->ld_data1 = cpu_to_be32(n);
  566. ptr = bh_log_ptr(bh);
  567. get_bh(bh);
  568. submit_bh(WRITE_SYNC, bh);
  569. gfs2_log_lock(sdp);
  570. while(!list_empty(list)) {
  571. bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
  572. list_move_tail(&bd->bd_le.le_list, done);
  573. get_bh(bd->bd_bh);
  574. while (be64_to_cpu(*ptr) != bd->bd_bh->b_blocknr) {
  575. gfs2_log_incr_head(sdp);
  576. ptr += 2;
  577. }
  578. gfs2_log_unlock(sdp);
  579. lock_buffer(bd->bd_bh);
  580. if (buffer_escaped(bd->bd_bh)) {
  581. void *kaddr;
  582. bh1 = gfs2_log_get_buf(sdp);
  583. kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0);
  584. memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh),
  585. bh1->b_size);
  586. kunmap_atomic(kaddr, KM_USER0);
  587. *(__be32 *)bh1->b_data = 0;
  588. clear_buffer_escaped(bd->bd_bh);
  589. unlock_buffer(bd->bd_bh);
  590. brelse(bd->bd_bh);
  591. } else {
  592. bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
  593. }
  594. submit_bh(WRITE_SYNC, bh1);
  595. gfs2_log_lock(sdp);
  596. ptr += 2;
  597. }
  598. gfs2_log_unlock(sdp);
  599. brelse(bh);
  600. }
  601. /**
  602. * databuf_lo_before_commit - Scan the data buffers, writing as we go
  603. *
  604. */
  605. static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
  606. {
  607. struct gfs2_bufdata *bd = NULL;
  608. struct buffer_head *bh = NULL;
  609. unsigned int n = 0;
  610. __be64 *ptr = NULL, *end = NULL;
  611. LIST_HEAD(processed);
  612. LIST_HEAD(in_progress);
  613. gfs2_log_lock(sdp);
  614. while (!list_empty(&sdp->sd_log_le_databuf)) {
  615. if (ptr == end) {
  616. gfs2_log_unlock(sdp);
  617. gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
  618. n = 0;
  619. bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA);
  620. ptr = bh_log_ptr(bh);
  621. end = bh_ptr_end(bh) - 1;
  622. gfs2_log_lock(sdp);
  623. continue;
  624. }
  625. bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list);
  626. list_move_tail(&bd->bd_le.le_list, &in_progress);
  627. gfs2_check_magic(bd->bd_bh);
  628. *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr);
  629. *ptr++ = cpu_to_be64(buffer_escaped(bh) ? 1 : 0);
  630. n++;
  631. }
  632. gfs2_log_unlock(sdp);
  633. gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
  634. gfs2_log_lock(sdp);
  635. list_splice(&processed, &sdp->sd_log_le_databuf);
  636. gfs2_log_unlock(sdp);
  637. }
  638. static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
  639. struct gfs2_log_descriptor *ld,
  640. __be64 *ptr, int pass)
  641. {
  642. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  643. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  644. struct gfs2_glock *gl = ip->i_gl;
  645. unsigned int blks = be32_to_cpu(ld->ld_data1);
  646. struct buffer_head *bh_log, *bh_ip;
  647. u64 blkno;
  648. u64 esc;
  649. int error = 0;
  650. if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
  651. return 0;
  652. gfs2_replay_incr_blk(sdp, &start);
  653. for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
  654. blkno = be64_to_cpu(*ptr++);
  655. esc = be64_to_cpu(*ptr++);
  656. sdp->sd_found_blocks++;
  657. if (gfs2_revoke_check(sdp, blkno, start))
  658. continue;
  659. error = gfs2_replay_read_block(jd, start, &bh_log);
  660. if (error)
  661. return error;
  662. bh_ip = gfs2_meta_new(gl, blkno);
  663. memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
  664. /* Unescape */
  665. if (esc) {
  666. __be32 *eptr = (__be32 *)bh_ip->b_data;
  667. *eptr = cpu_to_be32(GFS2_MAGIC);
  668. }
  669. mark_buffer_dirty(bh_ip);
  670. brelse(bh_log);
  671. brelse(bh_ip);
  672. sdp->sd_replayed_blocks++;
  673. }
  674. return error;
  675. }
  676. /* FIXME: sort out accounting for log blocks etc. */
  677. static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
  678. {
  679. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  680. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  681. if (error) {
  682. gfs2_meta_sync(ip->i_gl);
  683. return;
  684. }
  685. if (pass != 1)
  686. return;
  687. /* data sync? */
  688. gfs2_meta_sync(ip->i_gl);
  689. fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
  690. jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
  691. }
  692. static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
  693. {
  694. struct list_head *head = &sdp->sd_log_le_databuf;
  695. struct gfs2_bufdata *bd;
  696. while (!list_empty(head)) {
  697. bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
  698. list_del_init(&bd->bd_le.le_list);
  699. sdp->sd_log_num_databuf--;
  700. gfs2_unpin(sdp, bd->bd_bh, ai);
  701. }
  702. gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
  703. }
  704. const struct gfs2_log_operations gfs2_buf_lops = {
  705. .lo_add = buf_lo_add,
  706. .lo_before_commit = buf_lo_before_commit,
  707. .lo_after_commit = buf_lo_after_commit,
  708. .lo_before_scan = buf_lo_before_scan,
  709. .lo_scan_elements = buf_lo_scan_elements,
  710. .lo_after_scan = buf_lo_after_scan,
  711. .lo_name = "buf",
  712. };
  713. const struct gfs2_log_operations gfs2_revoke_lops = {
  714. .lo_add = revoke_lo_add,
  715. .lo_before_commit = revoke_lo_before_commit,
  716. .lo_after_commit = revoke_lo_after_commit,
  717. .lo_before_scan = revoke_lo_before_scan,
  718. .lo_scan_elements = revoke_lo_scan_elements,
  719. .lo_after_scan = revoke_lo_after_scan,
  720. .lo_name = "revoke",
  721. };
  722. const struct gfs2_log_operations gfs2_rg_lops = {
  723. .lo_name = "rg",
  724. };
  725. const struct gfs2_log_operations gfs2_databuf_lops = {
  726. .lo_add = databuf_lo_add,
  727. .lo_before_commit = databuf_lo_before_commit,
  728. .lo_after_commit = databuf_lo_after_commit,
  729. .lo_scan_elements = databuf_lo_scan_elements,
  730. .lo_after_scan = databuf_lo_after_scan,
  731. .lo_name = "databuf",
  732. };
  733. const struct gfs2_log_operations *gfs2_log_ops[] = {
  734. &gfs2_databuf_lops,
  735. &gfs2_buf_lops,
  736. &gfs2_rg_lops,
  737. &gfs2_revoke_lops,
  738. NULL,
  739. };